1 /** 2 * D header file for perf_event_open system call. 3 * 4 * Converted from linux userspace header, comments included. 5 * 6 * Authors: Max Haughton 7 */ 8 module core.sys.linux.perf_event; 9 version (linux) : extern (C): 10 @nogc: 11 nothrow: 12 13 import core.sys.posix.sys.ioctl; 14 import core.sys.posix.unistd; 15 16 version (HPPA) version = HPPA_Any; 17 version (HPPA64) version = HPPA_Any; 18 version (PPC) version = PPC_Any; 19 version (PPC64) version = PPC_Any; 20 version (RISCV32) version = RISCV_Any; 21 version (RISCV64) version = RISCV_Any; 22 version (S390) version = IBMZ_Any; 23 version (SPARC) version = SPARC_Any; 24 version (SPARC64) version = SPARC_Any; 25 version (SystemZ) version = IBMZ_Any; 26 27 version (X86_64) 28 { 29 version (D_X32) 30 enum __NR_perf_event_open = 0x40000000 + 298; 31 else 32 enum __NR_perf_event_open = 298; 33 } 34 else version (X86) 35 { 36 enum __NR_perf_event_open = 336; 37 } 38 else version (ARM) 39 { 40 enum __NR_perf_event_open = 364; 41 } 42 else version (AArch64) 43 { 44 enum __NR_perf_event_open = 241; 45 } 46 else version (HPPA_Any) 47 { 48 enum __NR_perf_event_open = 318; 49 } 50 else version (IBMZ_Any) 51 { 52 enum __NR_perf_event_open = 331; 53 } 54 else version (MIPS32) 55 { 56 enum __NR_perf_event_open = 4333; 57 } 58 else version (MIPS64) 59 { 60 version (MIPS_N32) 61 enum __NR_perf_event_open = 6296; 62 else version (MIPS_N64) 63 enum __NR_perf_event_open = 5292; 64 else 65 static assert(0, "Architecture not supported"); 66 } 67 else version (PPC_Any) 68 { 69 enum __NR_perf_event_open = 319; 70 } 71 else version (RISCV_Any) 72 { 73 enum __NR_perf_event_open = 241; 74 } 75 else version (SPARC_Any) 76 { 77 enum __NR_perf_event_open = 327; 78 } 79 else 80 { 81 static assert(0, "Architecture not supported"); 82 } 83 extern (C) extern long syscall(long __sysno, ...); 84 static long perf_event_open(perf_event_attr* hw_event, pid_t pid, int cpu, int group_fd, ulong flags) 85 { 86 return syscall(__NR_perf_event_open, hw_event, pid, cpu, group_fd, flags); 87 } 88 /* 89 * User-space ABI bits: 90 */ 91 92 /** 93 * attr.type 94 */ 95 enum perf_type_id 96 { 97 PERF_TYPE_HARDWARE = 0, 98 PERF_TYPE_SOFTWARE = 1, 99 PERF_TYPE_TRACEPOINT = 2, 100 PERF_TYPE_HW_CACHE = 3, 101 PERF_TYPE_RAW = 4, 102 PERF_TYPE_BREAKPOINT = 5, 103 104 PERF_TYPE_MAX = 6 /* non-ABI */ 105 } 106 /** 107 * Generalized performance event event_id types, used by the 108 * attr.event_id parameter of the sys_perf_event_open() 109 * syscall: 110 */ 111 enum perf_hw_id 112 { 113 /// 114 PERF_COUNT_HW_CPU_CYCLES = 0, 115 /// 116 PERF_COUNT_HW_INSTRUCTIONS = 1, 117 /// 118 PERF_COUNT_HW_CACHE_REFERENCES = 2, 119 /// 120 PERF_COUNT_HW_CACHE_MISSES = 3, 121 /// 122 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, 123 /// 124 PERF_COUNT_HW_BRANCH_MISSES = 5, 125 /// 126 PERF_COUNT_HW_BUS_CYCLES = 6, 127 /// 128 PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, 129 /// 130 PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, 131 /// 132 PERF_COUNT_HW_REF_CPU_CYCLES = 9, 133 /// 134 PERF_COUNT_HW_MAX = 10 /* non-ABI */ 135 } 136 137 /** 138 * Generalized hardware cache events: 139 * 140 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x 141 * { read, write, prefetch } x 142 * { accesses, misses } 143 */ 144 enum perf_hw_cache_id 145 { 146 /// 147 PERF_COUNT_HW_CACHE_L1D = 0, 148 /// 149 PERF_COUNT_HW_CACHE_L1I = 1, 150 /// 151 PERF_COUNT_HW_CACHE_LL = 2, 152 /// 153 PERF_COUNT_HW_CACHE_DTLB = 3, 154 /// 155 PERF_COUNT_HW_CACHE_ITLB = 4, 156 /// 157 PERF_COUNT_HW_CACHE_BPU = 5, 158 /// 159 PERF_COUNT_HW_CACHE_NODE = 6, 160 /// 161 PERF_COUNT_HW_CACHE_MAX = 7 /* non-ABI */ 162 } 163 /// 164 enum perf_hw_cache_op_id 165 { 166 /// 167 PERF_COUNT_HW_CACHE_OP_READ = 0, 168 /// 169 PERF_COUNT_HW_CACHE_OP_WRITE = 1, 170 /// 171 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, 172 /// 173 PERF_COUNT_HW_CACHE_OP_MAX = 3 /* non-ABI */ 174 } 175 /// 176 enum perf_hw_cache_op_result_id 177 { 178 /// 179 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, 180 /// 181 PERF_COUNT_HW_CACHE_RESULT_MISS = 1, 182 /// 183 PERF_COUNT_HW_CACHE_RESULT_MAX = 2 /* non-ABI */ 184 } 185 186 /** 187 * Special "software" events provided by the kernel, even if the hardware 188 * does not support performance events. These events measure various 189 * physical and sw events of the kernel (and allow the profiling of them as 190 * well): 191 */ 192 enum perf_sw_ids 193 { 194 /// 195 PERF_COUNT_SW_CPU_CLOCK = 0, 196 /// 197 PERF_COUNT_SW_TASK_CLOCK = 1, 198 /// 199 PERF_COUNT_SW_PAGE_FAULTS = 2, 200 /// 201 PERF_COUNT_SW_CONTEXT_SWITCHES = 3, 202 /// 203 PERF_COUNT_SW_CPU_MIGRATIONS = 4, 204 /// 205 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, 206 /// 207 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, 208 /// 209 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, 210 /// 211 PERF_COUNT_SW_EMULATION_FAULTS = 8, 212 /// 213 PERF_COUNT_SW_DUMMY = 9, 214 /// 215 PERF_COUNT_SW_BPF_OUTPUT = 10, 216 /// 217 PERF_COUNT_SW_MAX = 11 /* non-ABI */ 218 } 219 220 /** 221 * Bits that can be set in attr.sample_type to request information 222 * in the overflow packets. 223 */ 224 enum perf_event_sample_format 225 { 226 /// 227 PERF_SAMPLE_IP = 1U << 0, 228 /// 229 PERF_SAMPLE_TID = 1U << 1, 230 /// 231 PERF_SAMPLE_TIME = 1U << 2, 232 /// 233 PERF_SAMPLE_ADDR = 1U << 3, 234 /// 235 PERF_SAMPLE_READ = 1U << 4, 236 /// 237 PERF_SAMPLE_CALLCHAIN = 1U << 5, 238 /// 239 PERF_SAMPLE_ID = 1U << 6, 240 /// 241 PERF_SAMPLE_CPU = 1U << 7, 242 /// 243 PERF_SAMPLE_PERIOD = 1U << 8, 244 /// 245 PERF_SAMPLE_STREAM_ID = 1U << 9, 246 /// 247 PERF_SAMPLE_RAW = 1U << 10, 248 /// 249 PERF_SAMPLE_BRANCH_STACK = 1U << 11, 250 /// 251 PERF_SAMPLE_REGS_USER = 1U << 12, 252 /// 253 PERF_SAMPLE_STACK_USER = 1U << 13, 254 /// 255 PERF_SAMPLE_WEIGHT = 1U << 14, 256 /// 257 PERF_SAMPLE_DATA_SRC = 1U << 15, 258 /// 259 PERF_SAMPLE_IDENTIFIER = 1U << 16, 260 /// 261 PERF_SAMPLE_TRANSACTION = 1U << 17, 262 /// 263 PERF_SAMPLE_REGS_INTR = 1U << 18, 264 /// 265 PERF_SAMPLE_PHYS_ADDR = 1U << 19, 266 /// 267 PERF_SAMPLE_MAX = 1U << 20 /* non-ABI */ 268 } 269 270 /** 271 * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set 272 * 273 * If the user does not pass priv level information via branch_sample_type, 274 * the kernel uses the event's priv level. Branch and event priv levels do 275 * not have to match. Branch priv level is checked for permissions. 276 * 277 * The branch types can be combined, however BRANCH_ANY covers all types 278 * of branches and therefore it supersedes all the other types. 279 */ 280 enum perf_branch_sample_type_shift 281 { 282 PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /** user branches */ 283 PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /** kernel branches */ 284 PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /** hypervisor branches */ 285 286 PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /** any branch types */ 287 PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /** any call branch */ 288 PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /** any return branch */ 289 PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /** indirect calls */ 290 PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /** transaction aborts */ 291 PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /** in transaction */ 292 PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /** not in transaction */ 293 PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /** conditional branches */ 294 295 PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /** call/ret stack */ 296 PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /** indirect jumps */ 297 PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /** direct call */ 298 299 PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /** no flags */ 300 PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /** no cycles */ 301 302 PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /** save branch type */ 303 304 PERF_SAMPLE_BRANCH_MAX_SHIFT = 17 /** non-ABI */ 305 } 306 /// 307 enum perf_branch_sample_type 308 { 309 PERF_SAMPLE_BRANCH_USER = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_USER_SHIFT, 310 PERF_SAMPLE_BRANCH_KERNEL = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_KERNEL_SHIFT, 311 PERF_SAMPLE_BRANCH_HV = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_HV_SHIFT, 312 PERF_SAMPLE_BRANCH_ANY = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_ANY_SHIFT, 313 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, 314 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, 315 PERF_SAMPLE_BRANCH_IND_CALL = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_IND_CALL_SHIFT, 316 PERF_SAMPLE_BRANCH_ABORT_TX = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT, 317 PERF_SAMPLE_BRANCH_IN_TX = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_IN_TX_SHIFT, 318 PERF_SAMPLE_BRANCH_NO_TX = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_NO_TX_SHIFT, 319 PERF_SAMPLE_BRANCH_COND = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_COND_SHIFT, 320 PERF_SAMPLE_BRANCH_CALL_STACK = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT, 321 PERF_SAMPLE_BRANCH_IND_JUMP = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT, 322 PERF_SAMPLE_BRANCH_CALL = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_CALL_SHIFT, 323 PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT, 324 PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT, 325 PERF_SAMPLE_BRANCH_TYPE_SAVE = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT, 326 PERF_SAMPLE_BRANCH_MAX = 1U << perf_branch_sample_type_shift.PERF_SAMPLE_BRANCH_MAX_SHIFT 327 } 328 329 /** 330 * Common flow change classification 331 */ 332 enum 333 { 334 PERF_BR_UNKNOWN = 0, /** unknown */ 335 PERF_BR_COND = 1, /** conditional */ 336 PERF_BR_UNCOND = 2, /** unconditional */ 337 PERF_BR_IND = 3, /** indirect */ 338 PERF_BR_CALL = 4, /** function call */ 339 PERF_BR_IND_CALL = 5, /** indirect function call */ 340 PERF_BR_RET = 6, /** function return */ 341 PERF_BR_SYSCALL = 7, /** syscall */ 342 PERF_BR_SYSRET = 8, /** syscall return */ 343 PERF_BR_COND_CALL = 9, /** conditional function call */ 344 PERF_BR_COND_RET = 10, /** conditional function return */ 345 PERF_BR_MAX = 11 346 } 347 348 /// 349 enum PERF_SAMPLE_BRANCH_PLM_ALL = perf_branch_sample_type.PERF_SAMPLE_BRANCH_USER 350 | perf_branch_sample_type.PERF_SAMPLE_BRANCH_KERNEL 351 | perf_branch_sample_type.PERF_SAMPLE_BRANCH_HV; 352 353 /** 354 * Values to determine ABI of the registers dump. 355 */ 356 enum perf_sample_regs_abi 357 { 358 /// 359 PERF_SAMPLE_REGS_ABI_NONE = 0, 360 /// 361 PERF_SAMPLE_REGS_ABI_32 = 1, 362 /// 363 PERF_SAMPLE_REGS_ABI_64 = 2 364 } 365 366 /** 367 * Values for the memory transaction event qualifier, mostly for 368 * abort events. Multiple bits can be set. 369 */ 370 enum 371 { 372 PERF_TXN_ELISION = 1 << 0, /** From elision */ 373 PERF_TXN_TRANSACTION = 1 << 1, /** From transaction */ 374 PERF_TXN_SYNC = 1 << 2, /** Instruction is related */ 375 PERF_TXN_ASYNC = 1 << 3, /** Instruction not related */ 376 PERF_TXN_RETRY = 1 << 4, /** Retry possible */ 377 PERF_TXN_CONFLICT = 1 << 5, /** Conflict abort */ 378 PERF_TXN_CAPACITY_WRITE = 1 << 6, /** Capacity write abort */ 379 PERF_TXN_CAPACITY_READ = 1 << 7, /** Capacity read abort */ 380 381 PERF_TXN_MAX = 1 << 8, /** non-ABI */ 382 383 /** bits 32..63 are reserved for the abort code */ 384 385 ///PERF_TXN_ABORT_MASK = 0xffffffff << 32, 386 PERF_TXN_ABORT_SHIFT = 32 387 } 388 389 /** 390 * The format of the data returned by read() on a perf event fd, 391 * as specified by attr.read_format: 392 * --- 393 * struct read_format { 394 * { u64 value; 395 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 396 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 397 * { u64 id; } && PERF_FORMAT_ID 398 * } && !PERF_FORMAT_GROUP 399 * 400 * { u64 nr; 401 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED 402 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING 403 * { u64 value; 404 * { u64 id; } && PERF_FORMAT_ID 405 * } cntr[nr]; 406 * } && PERF_FORMAT_GROUP 407 * }; 408 * --- 409 */ 410 enum perf_event_read_format 411 { 412 /// 413 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, 414 /// 415 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, 416 /// 417 PERF_FORMAT_ID = 1U << 2, 418 /// 419 PERF_FORMAT_GROUP = 1U << 3, 420 PERF_FORMAT_MAX = 1U << 4 /** non-ABI */ 421 } 422 423 enum PERF_ATTR_SIZE_VER0 = 64; /** sizeof first published struct */ 424 enum PERF_ATTR_SIZE_VER1 = 72; /** add: config2 */ 425 enum PERF_ATTR_SIZE_VER2 = 80; /** add: branch_sample_type */ 426 enum PERF_ATTR_SIZE_VER3 = 96; /** add: sample_regs_user */ 427 /* add: sample_stack_user */ 428 enum PERF_ATTR_SIZE_VER4 = 104; /** add: sample_regs_intr */ 429 enum PERF_ATTR_SIZE_VER5 = 112; /** add: aux_watermark */ 430 431 /** 432 * Hardware event_id to monitor via a performance monitoring event: 433 * 434 * @sample_max_stack: Max number of frame pointers in a callchain, 435 * should be < /proc/sys/kernel/perf_event_max_stack 436 */ 437 struct perf_event_attr 438 { 439 /** 440 *Major type: hardware/software/tracepoint/etc. 441 */ 442 uint type; 443 444 /** 445 * Size of the attr structure, for fwd/bwd compat. 446 */ 447 uint size; 448 449 /** 450 * Type specific configuration information. 451 */ 452 ulong config; 453 /// 454 union 455 { 456 /// 457 ulong sample_period; 458 /// 459 ulong sample_freq; 460 } 461 /// 462 ulong sample_type; 463 /// 464 ulong read_format; 465 466 // mixin(bitfields!( 467 // ulong, "disabled", 1, 468 // ulong, "inherit", 1, 469 // ulong, "pinned", 1, 470 // ulong, "exclusive", 1, 471 // ulong, "exclude_user", 1, 472 // ulong, "exclude_kernel", 1, 473 // ulong, "exclude_hv", 1, 474 // ulong, "exclude_idle", 1, 475 // ulong, "mmap", 1, 476 // ulong, "comm", 1, 477 // ulong, "freq", 1, 478 // ulong, "inherit_stat", 1, 479 // ulong, "enable_on_exec", 1, 480 // ulong, "task", 1, 481 // ulong, "watermark", 1, 482 // ulong, "precise_ip", 2, 483 // ulong, "mmap_data", 1, 484 // ulong, "sample_id_all", 1, 485 // ulong, "exclude_host", 1, 486 // ulong, "exclude_guest", 1, 487 // ulong, "exclude_callchain_kernel", 1, 488 // ulong, "exclude_callchain_user", 1, 489 // ulong, "mmap2", 1, 490 // ulong, "comm_exec", 1, 491 // ulong, "use_clockid", 1, 492 // ulong, "context_switch", 1, 493 // ulong, "write_backward", 1, 494 // ulong, "namespaces", 1, 495 // ulong, "__reserved_1", 35)); 496 private ulong perf_event_attr_bitmanip; 497 /// 498 @property ulong disabled() @safe pure nothrow @nogc const 499 { 500 auto result = (perf_event_attr_bitmanip & 1U) >> 0U; 501 return cast(ulong) result; 502 } 503 /// 504 @property void disabled(ulong v) @safe pure nothrow @nogc 505 { 506 assert(v >= disabled_min, 507 "Value is smaller than the minimum value of bitfield 'disabled'"); 508 assert(v <= disabled_max, 509 "Value is greater than the maximum value of bitfield 'disabled'"); 510 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 511 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 1U)) | ( 512 (cast(typeof(perf_event_attr_bitmanip)) v << 0U) & 1U)); 513 } 514 515 enum ulong disabled_min = cast(ulong) 0U; 516 enum ulong disabled_max = cast(ulong) 1U; 517 /// 518 @property ulong inherit() @safe pure nothrow @nogc const 519 { 520 auto result = (perf_event_attr_bitmanip & 2U) >> 1U; 521 return cast(ulong) result; 522 } 523 /// 524 @property void inherit(ulong v) @safe pure nothrow @nogc 525 { 526 assert(v >= inherit_min, 527 "Value is smaller than the minimum value of bitfield 'inherit'"); 528 assert(v <= inherit_max, 529 "Value is greater than the maximum value of bitfield 'inherit'"); 530 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 531 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 2U)) | ( 532 (cast(typeof(perf_event_attr_bitmanip)) v << 1U) & 2U)); 533 } 534 535 enum ulong inherit_min = cast(ulong) 0U; 536 enum ulong inherit_max = cast(ulong) 1U; 537 /// 538 @property ulong pinned() @safe pure nothrow @nogc const 539 { 540 auto result = (perf_event_attr_bitmanip & 4U) >> 2U; 541 return cast(ulong) result; 542 } 543 /// 544 @property void pinned(ulong v) @safe pure nothrow @nogc 545 { 546 assert(v >= pinned_min, 547 "Value is smaller than the minimum value of bitfield 'pinned'"); 548 assert(v <= pinned_max, 549 "Value is greater than the maximum value of bitfield 'pinned'"); 550 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 551 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 4U)) | ( 552 (cast(typeof(perf_event_attr_bitmanip)) v << 2U) & 4U)); 553 } 554 555 enum ulong pinned_min = cast(ulong) 0U; 556 enum ulong pinned_max = cast(ulong) 1U; 557 /// 558 @property ulong exclusive() @safe pure nothrow @nogc const 559 { 560 auto result = (perf_event_attr_bitmanip & 8U) >> 3U; 561 return cast(ulong) result; 562 } 563 /// 564 @property void exclusive(ulong v) @safe pure nothrow @nogc 565 { 566 assert(v >= exclusive_min, 567 "Value is smaller than the minimum value of bitfield 'exclusive'"); 568 assert(v <= exclusive_max, 569 "Value is greater than the maximum value of bitfield 'exclusive'"); 570 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 571 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 8U)) | ( 572 (cast(typeof(perf_event_attr_bitmanip)) v << 3U) & 8U)); 573 } 574 575 enum ulong exclusive_min = cast(ulong) 0U; 576 enum ulong exclusive_max = cast(ulong) 1U; 577 /// 578 @property ulong exclude_user() @safe pure nothrow @nogc const 579 { 580 auto result = (perf_event_attr_bitmanip & 16U) >> 4U; 581 return cast(ulong) result; 582 } 583 /// 584 @property void exclude_user(ulong v) @safe pure nothrow @nogc 585 { 586 assert(v >= exclude_user_min, 587 "Value is smaller than the minimum value of bitfield 'exclude_user'"); 588 assert(v <= exclude_user_max, 589 "Value is greater than the maximum value of bitfield 'exclude_user'"); 590 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 591 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 16U)) | ( 592 (cast(typeof(perf_event_attr_bitmanip)) v << 4U) & 16U)); 593 } 594 595 enum ulong exclude_user_min = cast(ulong) 0U; 596 enum ulong exclude_user_max = cast(ulong) 1U; 597 /// 598 @property ulong exclude_kernel() @safe pure nothrow @nogc const 599 { 600 auto result = (perf_event_attr_bitmanip & 32U) >> 5U; 601 return cast(ulong) result; 602 } 603 /// 604 @property void exclude_kernel(ulong v) @safe pure nothrow @nogc 605 { 606 assert(v >= exclude_kernel_min, 607 "Value is smaller than the minimum value of bitfield 'exclude_kernel'"); 608 assert(v <= exclude_kernel_max, 609 "Value is greater than the maximum value of bitfield 'exclude_kernel'"); 610 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 611 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 32U)) | ( 612 (cast(typeof(perf_event_attr_bitmanip)) v << 5U) & 32U)); 613 } 614 615 enum ulong exclude_kernel_min = cast(ulong) 0U; 616 enum ulong exclude_kernel_max = cast(ulong) 1U; 617 /// 618 @property ulong exclude_hv() @safe pure nothrow @nogc const 619 { 620 auto result = (perf_event_attr_bitmanip & 64U) >> 6U; 621 return cast(ulong) result; 622 } 623 /// 624 @property void exclude_hv(ulong v) @safe pure nothrow @nogc 625 { 626 assert(v >= exclude_hv_min, 627 "Value is smaller than the minimum value of bitfield 'exclude_hv'"); 628 assert(v <= exclude_hv_max, 629 "Value is greater than the maximum value of bitfield 'exclude_hv'"); 630 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 631 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 64U)) | ( 632 (cast(typeof(perf_event_attr_bitmanip)) v << 6U) & 64U)); 633 } 634 635 enum ulong exclude_hv_min = cast(ulong) 0U; 636 enum ulong exclude_hv_max = cast(ulong) 1U; 637 /// 638 @property ulong exclude_idle() @safe pure nothrow @nogc const 639 { 640 auto result = (perf_event_attr_bitmanip & 128U) >> 7U; 641 return cast(ulong) result; 642 } 643 /// 644 @property void exclude_idle(ulong v) @safe pure nothrow @nogc 645 { 646 assert(v >= exclude_idle_min, 647 "Value is smaller than the minimum value of bitfield 'exclude_idle'"); 648 assert(v <= exclude_idle_max, 649 "Value is greater than the maximum value of bitfield 'exclude_idle'"); 650 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 651 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 128U)) | ( 652 (cast(typeof(perf_event_attr_bitmanip)) v << 7U) & 128U)); 653 } 654 655 enum ulong exclude_idle_min = cast(ulong) 0U; 656 enum ulong exclude_idle_max = cast(ulong) 1U; 657 /// 658 @property ulong mmap() @safe pure nothrow @nogc const 659 { 660 auto result = (perf_event_attr_bitmanip & 256U) >> 8U; 661 return cast(ulong) result; 662 } 663 /// 664 @property void mmap(ulong v) @safe pure nothrow @nogc 665 { 666 assert(v >= mmap_min, "Value is smaller than the minimum value of bitfield 'mmap'"); 667 assert(v <= mmap_max, "Value is greater than the maximum value of bitfield 'mmap'"); 668 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 669 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 256U)) | ( 670 (cast(typeof(perf_event_attr_bitmanip)) v << 8U) & 256U)); 671 } 672 673 enum ulong mmap_min = cast(ulong) 0U; 674 enum ulong mmap_max = cast(ulong) 1U; 675 /// 676 @property ulong comm() @safe pure nothrow @nogc const 677 { 678 auto result = (perf_event_attr_bitmanip & 512U) >> 9U; 679 return cast(ulong) result; 680 } 681 /// 682 @property void comm(ulong v) @safe pure nothrow @nogc 683 { 684 assert(v >= comm_min, "Value is smaller than the minimum value of bitfield 'comm'"); 685 assert(v <= comm_max, "Value is greater than the maximum value of bitfield 'comm'"); 686 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 687 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 512U)) | ( 688 (cast(typeof(perf_event_attr_bitmanip)) v << 9U) & 512U)); 689 } 690 691 enum ulong comm_min = cast(ulong) 0U; 692 enum ulong comm_max = cast(ulong) 1U; 693 /// 694 @property ulong freq() @safe pure nothrow @nogc const 695 { 696 auto result = (perf_event_attr_bitmanip & 1024U) >> 10U; 697 return cast(ulong) result; 698 } 699 /// 700 @property void freq(ulong v) @safe pure nothrow @nogc 701 { 702 assert(v >= freq_min, "Value is smaller than the minimum value of bitfield 'freq'"); 703 assert(v <= freq_max, "Value is greater than the maximum value of bitfield 'freq'"); 704 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 705 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 1024U)) | ( 706 (cast(typeof(perf_event_attr_bitmanip)) v << 10U) & 1024U)); 707 } 708 709 enum ulong freq_min = cast(ulong) 0U; 710 enum ulong freq_max = cast(ulong) 1U; 711 /// 712 @property ulong inherit_stat() @safe pure nothrow @nogc const 713 { 714 auto result = (perf_event_attr_bitmanip & 2048U) >> 11U; 715 return cast(ulong) result; 716 } 717 /// 718 @property void inherit_stat(ulong v) @safe pure nothrow @nogc 719 { 720 assert(v >= inherit_stat_min, 721 "Value is smaller than the minimum value of bitfield 'inherit_stat'"); 722 assert(v <= inherit_stat_max, 723 "Value is greater than the maximum value of bitfield 'inherit_stat'"); 724 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 725 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 2048U)) | ( 726 (cast(typeof(perf_event_attr_bitmanip)) v << 11U) & 2048U)); 727 } 728 729 enum ulong inherit_stat_min = cast(ulong) 0U; 730 enum ulong inherit_stat_max = cast(ulong) 1U; 731 /// 732 @property ulong enable_on_exec() @safe pure nothrow @nogc const 733 { 734 auto result = (perf_event_attr_bitmanip & 4096U) >> 12U; 735 return cast(ulong) result; 736 } 737 /// 738 @property void enable_on_exec(ulong v) @safe pure nothrow @nogc 739 { 740 assert(v >= enable_on_exec_min, 741 "Value is smaller than the minimum value of bitfield 'enable_on_exec'"); 742 assert(v <= enable_on_exec_max, 743 "Value is greater than the maximum value of bitfield 'enable_on_exec'"); 744 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 745 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 4096U)) | ( 746 (cast(typeof(perf_event_attr_bitmanip)) v << 12U) & 4096U)); 747 } 748 749 enum ulong enable_on_exec_min = cast(ulong) 0U; 750 enum ulong enable_on_exec_max = cast(ulong) 1U; 751 /// 752 @property ulong task() @safe pure nothrow @nogc const 753 { 754 auto result = (perf_event_attr_bitmanip & 8192U) >> 13U; 755 return cast(ulong) result; 756 } 757 /// 758 @property void task(ulong v) @safe pure nothrow @nogc 759 { 760 assert(v >= task_min, "Value is smaller than the minimum value of bitfield 'task'"); 761 assert(v <= task_max, "Value is greater than the maximum value of bitfield 'task'"); 762 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 763 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 8192U)) | ( 764 (cast(typeof(perf_event_attr_bitmanip)) v << 13U) & 8192U)); 765 } 766 767 enum ulong task_min = cast(ulong) 0U; 768 enum ulong task_max = cast(ulong) 1U; 769 /// 770 @property ulong watermark() @safe pure nothrow @nogc const 771 { 772 auto result = (perf_event_attr_bitmanip & 16384U) >> 14U; 773 return cast(ulong) result; 774 } 775 /// 776 @property void watermark(ulong v) @safe pure nothrow @nogc 777 { 778 assert(v >= watermark_min, 779 "Value is smaller than the minimum value of bitfield 'watermark'"); 780 assert(v <= watermark_max, 781 "Value is greater than the maximum value of bitfield 'watermark'"); 782 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 783 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 16384U)) | ( 784 (cast(typeof(perf_event_attr_bitmanip)) v << 14U) & 16384U)); 785 } 786 787 enum ulong watermark_min = cast(ulong) 0U; 788 enum ulong watermark_max = cast(ulong) 1U; 789 /// 790 @property ulong precise_ip() @safe pure nothrow @nogc const 791 { 792 auto result = (perf_event_attr_bitmanip & 98304U) >> 15U; 793 return cast(ulong) result; 794 } 795 /// 796 @property void precise_ip(ulong v) @safe pure nothrow @nogc 797 { 798 assert(v >= precise_ip_min, 799 "Value is smaller than the minimum value of bitfield 'precise_ip'"); 800 assert(v <= precise_ip_max, 801 "Value is greater than the maximum value of bitfield 'precise_ip'"); 802 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 803 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 98304U)) | ( 804 (cast(typeof(perf_event_attr_bitmanip)) v << 15U) & 98304U)); 805 } 806 807 enum ulong precise_ip_min = cast(ulong) 0U; 808 enum ulong precise_ip_max = cast(ulong) 3U; 809 /// 810 @property ulong mmap_data() @safe pure nothrow @nogc const 811 { 812 auto result = (perf_event_attr_bitmanip & 131072U) >> 17U; 813 return cast(ulong) result; 814 } 815 /// 816 @property void mmap_data(ulong v) @safe pure nothrow @nogc 817 { 818 assert(v >= mmap_data_min, 819 "Value is smaller than the minimum value of bitfield 'mmap_data'"); 820 assert(v <= mmap_data_max, 821 "Value is greater than the maximum value of bitfield 'mmap_data'"); 822 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 823 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 131072U)) | ( 824 (cast(typeof(perf_event_attr_bitmanip)) v << 17U) & 131072U)); 825 } 826 827 enum ulong mmap_data_min = cast(ulong) 0U; 828 enum ulong mmap_data_max = cast(ulong) 1U; 829 /// 830 @property ulong sample_id_all() @safe pure nothrow @nogc const 831 { 832 auto result = (perf_event_attr_bitmanip & 262144U) >> 18U; 833 return cast(ulong) result; 834 } 835 /// 836 @property void sample_id_all(ulong v) @safe pure nothrow @nogc 837 { 838 assert(v >= sample_id_all_min, 839 "Value is smaller than the minimum value of bitfield 'sample_id_all'"); 840 assert(v <= sample_id_all_max, 841 "Value is greater than the maximum value of bitfield 'sample_id_all'"); 842 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 843 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 262144U)) | ( 844 (cast(typeof(perf_event_attr_bitmanip)) v << 18U) & 262144U)); 845 } 846 847 enum ulong sample_id_all_min = cast(ulong) 0U; 848 enum ulong sample_id_all_max = cast(ulong) 1U; 849 /// 850 @property ulong exclude_host() @safe pure nothrow @nogc const 851 { 852 auto result = (perf_event_attr_bitmanip & 524288U) >> 19U; 853 return cast(ulong) result; 854 } 855 /// 856 @property void exclude_host(ulong v) @safe pure nothrow @nogc 857 { 858 assert(v >= exclude_host_min, 859 "Value is smaller than the minimum value of bitfield 'exclude_host'"); 860 assert(v <= exclude_host_max, 861 "Value is greater than the maximum value of bitfield 'exclude_host'"); 862 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 863 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 524288U)) | ( 864 (cast(typeof(perf_event_attr_bitmanip)) v << 19U) & 524288U)); 865 } 866 867 enum ulong exclude_host_min = cast(ulong) 0U; 868 enum ulong exclude_host_max = cast(ulong) 1U; 869 /// 870 @property ulong exclude_guest() @safe pure nothrow @nogc const 871 { 872 auto result = (perf_event_attr_bitmanip & 1048576U) >> 20U; 873 return cast(ulong) result; 874 } 875 /// 876 @property void exclude_guest(ulong v) @safe pure nothrow @nogc 877 { 878 assert(v >= exclude_guest_min, 879 "Value is smaller than the minimum value of bitfield 'exclude_guest'"); 880 assert(v <= exclude_guest_max, 881 "Value is greater than the maximum value of bitfield 'exclude_guest'"); 882 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 883 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 1048576U)) | ( 884 (cast(typeof(perf_event_attr_bitmanip)) v << 20U) & 1048576U)); 885 } 886 887 enum ulong exclude_guest_min = cast(ulong) 0U; 888 enum ulong exclude_guest_max = cast(ulong) 1U; 889 /// 890 @property ulong exclude_callchain_kernel() @safe pure nothrow @nogc const 891 { 892 auto result = (perf_event_attr_bitmanip & 2097152U) >> 21U; 893 return cast(ulong) result; 894 } 895 /// 896 @property void exclude_callchain_kernel(ulong v) @safe pure nothrow @nogc 897 { 898 assert(v >= exclude_callchain_kernel_min, 899 "Value is smaller than the minimum value of bitfield 'exclude_callchain_kernel'"); 900 assert(v <= exclude_callchain_kernel_max, 901 "Value is greater than the maximum value of bitfield 'exclude_callchain_kernel'"); 902 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 903 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 2097152U)) | ( 904 (cast(typeof(perf_event_attr_bitmanip)) v << 21U) & 2097152U)); 905 } 906 907 enum ulong exclude_callchain_kernel_min = cast(ulong) 0U; 908 enum ulong exclude_callchain_kernel_max = cast(ulong) 1U; 909 /// 910 @property ulong exclude_callchain_user() @safe pure nothrow @nogc const 911 { 912 auto result = (perf_event_attr_bitmanip & 4194304U) >> 22U; 913 return cast(ulong) result; 914 } 915 /// 916 @property void exclude_callchain_user(ulong v) @safe pure nothrow @nogc 917 { 918 assert(v >= exclude_callchain_user_min, 919 "Value is smaller than the minimum value of bitfield 'exclude_callchain_user'"); 920 assert(v <= exclude_callchain_user_max, 921 "Value is greater than the maximum value of bitfield 'exclude_callchain_user'"); 922 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 923 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 4194304U)) | ( 924 (cast(typeof(perf_event_attr_bitmanip)) v << 22U) & 4194304U)); 925 } 926 927 enum ulong exclude_callchain_user_min = cast(ulong) 0U; 928 enum ulong exclude_callchain_user_max = cast(ulong) 1U; 929 /// 930 @property ulong mmap2() @safe pure nothrow @nogc const 931 { 932 auto result = (perf_event_attr_bitmanip & 8388608U) >> 23U; 933 return cast(ulong) result; 934 } 935 /// 936 @property void mmap2(ulong v) @safe pure nothrow @nogc 937 { 938 assert(v >= mmap2_min, 939 "Value is smaller than the minimum value of bitfield 'mmap2'"); 940 assert(v <= mmap2_max, 941 "Value is greater than the maximum value of bitfield 'mmap2'"); 942 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 943 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 8388608U)) | ( 944 (cast(typeof(perf_event_attr_bitmanip)) v << 23U) & 8388608U)); 945 } 946 947 enum ulong mmap2_min = cast(ulong) 0U; 948 enum ulong mmap2_max = cast(ulong) 1U; 949 /// 950 @property ulong comm_exec() @safe pure nothrow @nogc const 951 { 952 auto result = (perf_event_attr_bitmanip & 16777216U) >> 24U; 953 return cast(ulong) result; 954 } 955 /// 956 @property void comm_exec(ulong v) @safe pure nothrow @nogc 957 { 958 assert(v >= comm_exec_min, 959 "Value is smaller than the minimum value of bitfield 'comm_exec'"); 960 assert(v <= comm_exec_max, 961 "Value is greater than the maximum value of bitfield 'comm_exec'"); 962 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 963 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 16777216U)) | ( 964 (cast(typeof(perf_event_attr_bitmanip)) v << 24U) & 16777216U)); 965 } 966 967 enum ulong comm_exec_min = cast(ulong) 0U; 968 enum ulong comm_exec_max = cast(ulong) 1U; 969 /// 970 @property ulong use_clockid() @safe pure nothrow @nogc const 971 { 972 auto result = (perf_event_attr_bitmanip & 33554432U) >> 25U; 973 return cast(ulong) result; 974 } 975 /// 976 @property void use_clockid(ulong v) @safe pure nothrow @nogc 977 { 978 assert(v >= use_clockid_min, 979 "Value is smaller than the minimum value of bitfield 'use_clockid'"); 980 assert(v <= use_clockid_max, 981 "Value is greater than the maximum value of bitfield 'use_clockid'"); 982 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 983 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 33554432U)) | ( 984 (cast(typeof(perf_event_attr_bitmanip)) v << 25U) & 33554432U)); 985 } 986 987 enum ulong use_clockid_min = cast(ulong) 0U; 988 enum ulong use_clockid_max = cast(ulong) 1U; 989 /// 990 @property ulong context_switch() @safe pure nothrow @nogc const 991 { 992 auto result = (perf_event_attr_bitmanip & 67108864U) >> 26U; 993 return cast(ulong) result; 994 } 995 /// 996 @property void context_switch(ulong v) @safe pure nothrow @nogc 997 { 998 assert(v >= context_switch_min, 999 "Value is smaller than the minimum value of bitfield 'context_switch'"); 1000 assert(v <= context_switch_max, 1001 "Value is greater than the maximum value of bitfield 'context_switch'"); 1002 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 1003 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 67108864U)) | ( 1004 (cast(typeof(perf_event_attr_bitmanip)) v << 26U) & 67108864U)); 1005 } 1006 1007 enum ulong context_switch_min = cast(ulong) 0U; 1008 enum ulong context_switch_max = cast(ulong) 1U; 1009 /// 1010 @property ulong write_backward() @safe pure nothrow @nogc const 1011 { 1012 auto result = (perf_event_attr_bitmanip & 134217728U) >> 27U; 1013 return cast(ulong) result; 1014 } 1015 /// 1016 @property void write_backward(ulong v) @safe pure nothrow @nogc 1017 { 1018 assert(v >= write_backward_min, 1019 "Value is smaller than the minimum value of bitfield 'write_backward'"); 1020 assert(v <= write_backward_max, 1021 "Value is greater than the maximum value of bitfield 'write_backward'"); 1022 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 1023 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 134217728U)) | ( 1024 (cast(typeof(perf_event_attr_bitmanip)) v << 27U) & 134217728U)); 1025 } 1026 1027 enum ulong write_backward_min = cast(ulong) 0U; 1028 enum ulong write_backward_max = cast(ulong) 1U; 1029 /// 1030 @property ulong namespaces() @safe pure nothrow @nogc const 1031 { 1032 auto result = (perf_event_attr_bitmanip & 268435456U) >> 28U; 1033 return cast(ulong) result; 1034 } 1035 /// 1036 @property void namespaces(ulong v) @safe pure nothrow @nogc 1037 { 1038 assert(v >= namespaces_min, 1039 "Value is smaller than the minimum value of bitfield 'namespaces'"); 1040 assert(v <= namespaces_max, 1041 "Value is greater than the maximum value of bitfield 'namespaces'"); 1042 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 1043 (perf_event_attr_bitmanip & (-1 - cast(typeof(perf_event_attr_bitmanip)) 268435456U)) | ( 1044 (cast(typeof(perf_event_attr_bitmanip)) v << 28U) & 268435456U)); 1045 } 1046 1047 enum ulong namespaces_min = cast(ulong) 0U; 1048 enum ulong namespaces_max = cast(ulong) 1U; 1049 /// 1050 @property ulong __reserved_1() @safe pure nothrow @nogc const 1051 { 1052 auto result = (perf_event_attr_bitmanip & 18446744073172680704UL) >> 29U; 1053 return cast(ulong) result; 1054 } 1055 /// 1056 @property void __reserved_1(ulong v) @safe pure nothrow @nogc 1057 { 1058 assert(v >= __reserved_1_min, 1059 "Value is smaller than the minimum value of bitfield '__reserved_1'"); 1060 assert(v <= __reserved_1_max, 1061 "Value is greater than the maximum value of bitfield '__reserved_1'"); 1062 perf_event_attr_bitmanip = cast(typeof(perf_event_attr_bitmanip))( 1063 (perf_event_attr_bitmanip & (-1 - cast( 1064 typeof(perf_event_attr_bitmanip)) 18446744073172680704UL)) | ( 1065 (cast(typeof(perf_event_attr_bitmanip)) v << 29U) & 18446744073172680704UL)); 1066 } 1067 1068 enum ulong __reserved_1_min = cast(ulong) 0U; 1069 enum ulong __reserved_1_max = cast(ulong) 34359738367UL; 1070 /// 1071 union 1072 { 1073 uint wakeup_events; /** wakeup every n events */ 1074 uint wakeup_watermark; /** bytes before wakeup */ 1075 } 1076 /// 1077 uint bp_type; 1078 1079 union 1080 { 1081 /// 1082 ulong bp_addr; 1083 ulong config1; /** extension of config */ 1084 } 1085 1086 union 1087 { 1088 /// 1089 ulong bp_len; 1090 ulong config2; /** extension of config1 */ 1091 } 1092 1093 ulong branch_sample_type; /** enum perf_branch_sample_type */ 1094 1095 /** 1096 * Defines set of user regs to dump on samples. 1097 * See asm/perf_regs.h for details. 1098 */ 1099 ulong sample_regs_user; 1100 1101 /** 1102 * Defines size of the user stack to dump on samples. 1103 */ 1104 uint sample_stack_user; 1105 /// 1106 int clockid; 1107 1108 /** 1109 * Defines set of regs to dump for each sample 1110 * state captured on: 1111 * - precise = 0: PMU interrupt 1112 * - precise > 0: sampled instruction 1113 * 1114 * See asm/perf_regs.h for details. 1115 */ 1116 ulong sample_regs_intr; 1117 1118 /** 1119 * Wakeup watermark for AUX area 1120 */ 1121 uint aux_watermark; 1122 /// 1123 ushort sample_max_stack; 1124 /** align to __u64 */ 1125 ushort __reserved_2; 1126 } 1127 /// 1128 extern (D) auto perf_flags(T)(auto ref T attr) 1129 { 1130 return *(&attr.read_format + 1); 1131 } 1132 1133 /** 1134 * Ioctls that can be done on a perf event fd: 1135 */ 1136 enum PERF_EVENT_IOC_ENABLE = _IO('$', 0); 1137 /// 1138 enum PERF_EVENT_IOC_DISABLE = _IO('$', 1); 1139 /// 1140 enum PERF_EVENT_IOC_REFRESH = _IO('$', 2); 1141 /// 1142 enum PERF_EVENT_IOC_RESET = _IO('$', 3); 1143 /// 1144 enum PERF_EVENT_IOC_PERIOD = _IOW!ulong('$', 4); 1145 /// 1146 enum PERF_EVENT_IOC_SET_OUTPUT = _IO('$', 5); 1147 /// 1148 enum PERF_EVENT_IOC_SET_FILTER = _IOW!(char*)('$', 6); 1149 /// 1150 enum PERF_EVENT_IOC_ID = _IOR!(ulong*)('$', 7); 1151 /// 1152 enum PERF_EVENT_IOC_SET_BPF = _IOW!uint('$', 8); 1153 /// 1154 enum PERF_EVENT_IOC_PAUSE_OUTPUT = _IOW!uint('$', 9); 1155 1156 /// 1157 enum perf_event_ioc_flags 1158 { 1159 PERF_IOC_FLAG_GROUP = 1U << 0 1160 } 1161 1162 /** 1163 * Structure of the page that can be mapped via mmap 1164 */ 1165 struct perf_event_mmap_page 1166 { 1167 uint version_; /** version number of this structure */ 1168 uint compat_version; /** lowest version this is compat with */ 1169 1170 /** 1171 * Bits needed to read the hw events in user-space. 1172 * --- 1173 * u32 seq, time_mult, time_shift, index, width; 1174 * u64 count, enabled, running; 1175 * u64 cyc, time_offset; 1176 * s64 pmc = 0; 1177 * 1178 * do { 1179 * seq = pc->lock; 1180 * barrier() 1181 * 1182 * enabled = pc->time_enabled; 1183 * running = pc->time_running; 1184 * 1185 * if (pc->cap_usr_time && enabled != running) { 1186 * cyc = rdtsc(); 1187 * time_offset = pc->time_offset; 1188 * time_mult = pc->time_mult; 1189 * time_shift = pc->time_shift; 1190 * } 1191 * 1192 * index = pc->index; 1193 * count = pc->offset; 1194 * if (pc->cap_user_rdpmc && index) { 1195 * width = pc->pmc_width; 1196 * pmc = rdpmc(index - 1); 1197 * } 1198 * 1199 * barrier(); 1200 * } while (pc->lock != seq); 1201 * --- 1202 * NOTE: for obvious reason this only works on self-monitoring 1203 * processes. 1204 */ 1205 uint lock; /** seqlock for synchronization */ 1206 uint index; /** hardware event identifier */ 1207 long offset; /** add to hardware event value */ 1208 ulong time_enabled; /** time event active */ 1209 ulong time_running; /** time event on cpu */ 1210 /// 1211 union 1212 { 1213 /// 1214 ulong capabilities; 1215 1216 struct 1217 { 1218 /* mixin(bitfields!(ulong, "cap_bit0", 1, ulong, "cap_bit0_is_deprecated", 1, ulong, 1219 "cap_user_rdpmc", 1, ulong, "cap_user_time", 1, ulong, 1220 "cap_user_time_zero", 1, ulong, "cap_____res", 59)); */ 1221 1222 private ulong mmap_page_bitmanip; 1223 /// 1224 @property ulong cap_bit0() @safe pure nothrow @nogc const 1225 { 1226 auto result = (mmap_page_bitmanip & 1U) >> 0U; 1227 return cast(ulong) result; 1228 } 1229 /// 1230 @property void cap_bit0(ulong v) @safe pure nothrow @nogc 1231 { 1232 assert(v >= cap_bit0_min, 1233 "Value is smaller than the minimum value of bitfield 'cap_bit0'"); 1234 assert(v <= cap_bit0_max, 1235 "Value is greater than the maximum value of bitfield 'cap_bit0'"); 1236 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))( 1237 (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 1U)) | ( 1238 (cast(typeof(mmap_page_bitmanip)) v << 0U) & 1U)); 1239 } 1240 1241 enum ulong cap_bit0_min = cast(ulong) 0U; 1242 enum ulong cap_bit0_max = cast(ulong) 1U; 1243 /// 1244 @property ulong cap_bit0_is_deprecated() @safe pure nothrow @nogc const 1245 { 1246 auto result = (mmap_page_bitmanip & 2U) >> 1U; 1247 return cast(ulong) result; 1248 } 1249 /// 1250 @property void cap_bit0_is_deprecated(ulong v) @safe pure nothrow @nogc 1251 { 1252 assert(v >= cap_bit0_is_deprecated_min, 1253 "Value is smaller than the minimum value of bitfield 'cap_bit0_is_deprecated'"); 1254 assert(v <= cap_bit0_is_deprecated_max, 1255 "Value is greater than the maximum value of bitfield 'cap_bit0_is_deprecated'"); 1256 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))( 1257 (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 2U)) | ( 1258 (cast(typeof(mmap_page_bitmanip)) v << 1U) & 2U)); 1259 } 1260 1261 enum ulong cap_bit0_is_deprecated_min = cast(ulong) 0U; 1262 enum ulong cap_bit0_is_deprecated_max = cast(ulong) 1U; 1263 /// 1264 @property ulong cap_user_rdpmc() @safe pure nothrow @nogc const 1265 { 1266 auto result = (mmap_page_bitmanip & 4U) >> 2U; 1267 return cast(ulong) result; 1268 } 1269 /// 1270 @property void cap_user_rdpmc(ulong v) @safe pure nothrow @nogc 1271 { 1272 assert(v >= cap_user_rdpmc_min, 1273 "Value is smaller than the minimum value of bitfield 'cap_user_rdpmc'"); 1274 assert(v <= cap_user_rdpmc_max, 1275 "Value is greater than the maximum value of bitfield 'cap_user_rdpmc'"); 1276 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))( 1277 (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 4U)) | ( 1278 (cast(typeof(mmap_page_bitmanip)) v << 2U) & 4U)); 1279 } 1280 1281 enum ulong cap_user_rdpmc_min = cast(ulong) 0U; 1282 enum ulong cap_user_rdpmc_max = cast(ulong) 1U; 1283 /// 1284 @property ulong cap_user_time() @safe pure nothrow @nogc const 1285 { 1286 auto result = (mmap_page_bitmanip & 8U) >> 3U; 1287 return cast(ulong) result; 1288 } 1289 /// 1290 @property void cap_user_time(ulong v) @safe pure nothrow @nogc 1291 { 1292 assert(v >= cap_user_time_min, 1293 "Value is smaller than the minimum value of bitfield 'cap_user_time'"); 1294 assert(v <= cap_user_time_max, 1295 "Value is greater than the maximum value of bitfield 'cap_user_time'"); 1296 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))( 1297 (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 8U)) | ( 1298 (cast(typeof(mmap_page_bitmanip)) v << 3U) & 8U)); 1299 } 1300 1301 enum ulong cap_user_time_min = cast(ulong) 0U; 1302 enum ulong cap_user_time_max = cast(ulong) 1U; 1303 /// 1304 @property ulong cap_user_time_zero() @safe pure nothrow @nogc const 1305 { 1306 auto result = (mmap_page_bitmanip & 16U) >> 4U; 1307 return cast(ulong) result; 1308 } 1309 /// 1310 @property void cap_user_time_zero(ulong v) @safe pure nothrow @nogc 1311 { 1312 assert(v >= cap_user_time_zero_min, 1313 "Value is smaller than the minimum value of bitfield 'cap_user_time_zero'"); 1314 assert(v <= cap_user_time_zero_max, 1315 "Value is greater than the maximum value of bitfield 'cap_user_time_zero'"); 1316 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))( 1317 (mmap_page_bitmanip & (-1 - cast(typeof(mmap_page_bitmanip)) 16U)) | ( 1318 (cast(typeof(mmap_page_bitmanip)) v << 4U) & 16U)); 1319 } 1320 1321 enum ulong cap_user_time_zero_min = cast(ulong) 0U; 1322 enum ulong cap_user_time_zero_max = cast(ulong) 1U; 1323 /// 1324 @property ulong cap_____res() @safe pure nothrow @nogc const 1325 { 1326 auto result = (mmap_page_bitmanip & 18446744073709551584UL) >> 5U; 1327 return cast(ulong) result; 1328 } 1329 /// 1330 @property void cap_____res(ulong v) @safe pure nothrow @nogc 1331 { 1332 assert(v >= cap_____res_min, 1333 "Value is smaller than the minimum value of bitfield 'cap_____res'"); 1334 assert(v <= cap_____res_max, 1335 "Value is greater than the maximum value of bitfield 'cap_____res'"); 1336 mmap_page_bitmanip = cast(typeof(mmap_page_bitmanip))((mmap_page_bitmanip & ( 1337 -1 - cast(typeof(mmap_page_bitmanip)) 18446744073709551584UL)) | ( 1338 (cast(typeof(mmap_page_bitmanip)) v << 5U) & 18446744073709551584UL)); 1339 } 1340 1341 enum ulong cap_____res_min = cast(ulong) 0U; 1342 enum ulong cap_____res_max = cast(ulong) 576460752303423487UL; 1343 } 1344 } 1345 1346 /** 1347 * If cap_user_rdpmc this field provides the bit-width of the value 1348 * read using the rdpmc() or equivalent instruction. This can be used 1349 * to sign extend the result like: 1350 * 1351 * pmc <<= 64 - width; 1352 * pmc >>= 64 - width; // signed shift right 1353 * count += pmc; 1354 */ 1355 ushort pmc_width; 1356 1357 /** 1358 * If cap_usr_time the below fields can be used to compute the time 1359 * delta since time_enabled (in ns) using rdtsc or similar. 1360 * 1361 * u64 quot, rem; 1362 * u64 delta; 1363 * 1364 * quot = (cyc >> time_shift); 1365 * rem = cyc & (((u64)1 << time_shift) - 1); 1366 * delta = time_offset + quot * time_mult + 1367 * ((rem * time_mult) >> time_shift); 1368 * 1369 * Where time_offset,time_mult,time_shift and cyc are read in the 1370 * seqcount loop described above. This delta can then be added to 1371 * enabled and possible running (if index), improving the scaling: 1372 * 1373 * enabled += delta; 1374 * if (index) 1375 * running += delta; 1376 * 1377 * quot = count / running; 1378 * rem = count % running; 1379 * count = quot * enabled + (rem * enabled) / running; 1380 */ 1381 ushort time_shift; 1382 /// 1383 uint time_mult; 1384 /// 1385 ulong time_offset; 1386 /** 1387 * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated 1388 * from sample timestamps. 1389 * 1390 * time = timestamp - time_zero; 1391 * quot = time / time_mult; 1392 * rem = time % time_mult; 1393 * cyc = (quot << time_shift) + (rem << time_shift) / time_mult; 1394 * 1395 * And vice versa: 1396 * 1397 * quot = cyc >> time_shift; 1398 * rem = cyc & (((u64)1 << time_shift) - 1); 1399 * timestamp = time_zero + quot * time_mult + 1400 * ((rem * time_mult) >> time_shift); 1401 */ 1402 ulong time_zero; 1403 uint size; /** Header size up to __reserved[] fields. */ 1404 1405 /** 1406 * Hole for extension of the self monitor capabilities 1407 */ 1408 1409 ubyte[948] __reserved; /** align to 1k. */ 1410 1411 /** 1412 * Control data for the mmap() data buffer. 1413 * 1414 * User-space reading the @data_head value should issue an smp_rmb(), 1415 * after reading this value. 1416 * 1417 * When the mapping is PROT_WRITE the @data_tail value should be 1418 * written by userspace to reflect the last read data, after issueing 1419 * an smp_mb() to separate the data read from the ->data_tail store. 1420 * In this case the kernel will not over-write unread data. 1421 * 1422 * See perf_output_put_handle() for the data ordering. 1423 * 1424 * data_{offset,size} indicate the location and size of the perf record 1425 * buffer within the mmapped area. 1426 */ 1427 ulong data_head; /** head in the data section */ 1428 ulong data_tail; /** user-space written tail */ 1429 ulong data_offset; /** where the buffer starts */ 1430 ulong data_size; /** data buffer size */ 1431 1432 /** 1433 * AUX area is defined by aux_{offset,size} fields that should be set 1434 * by the userspace, so that 1435 * --- 1436 * aux_offset >= data_offset + data_size 1437 * --- 1438 * prior to mmap()ing it. Size of the mmap()ed area should be aux_size. 1439 * 1440 * Ring buffer pointers aux_{head,tail} have the same semantics as 1441 * data_{head,tail} and same ordering rules apply. 1442 */ 1443 ulong aux_head; 1444 /// 1445 ulong aux_tail; 1446 /// 1447 ulong aux_offset; 1448 /// 1449 ulong aux_size; 1450 } 1451 /// 1452 enum PERF_RECORD_MISC_CPUMODE_MASK = 7 << 0; 1453 /// 1454 enum PERF_RECORD_MISC_CPUMODE_UNKNOWN = 0 << 0; 1455 /// 1456 enum PERF_RECORD_MISC_KERNEL = 1 << 0; 1457 /// 1458 enum PERF_RECORD_MISC_USER = 2 << 0; 1459 /// 1460 enum PERF_RECORD_MISC_HYPERVISOR = 3 << 0; 1461 /// 1462 enum PERF_RECORD_MISC_GUEST_KERNEL = 4 << 0; 1463 /// 1464 enum PERF_RECORD_MISC_GUEST_USER = 5 << 0; 1465 1466 /** 1467 * Indicates that /proc/PID/maps parsing are truncated by time out. 1468 */ 1469 enum PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT = 1 << 12; 1470 /** 1471 * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on 1472 * different events so can reuse the same bit position. 1473 * Ditto PERF_RECORD_MISC_SWITCH_OUT. 1474 */ 1475 enum PERF_RECORD_MISC_MMAP_DATA = 1 << 13; 1476 /// 1477 enum PERF_RECORD_MISC_COMM_EXEC = 1 << 13; 1478 /// 1479 enum PERF_RECORD_MISC_SWITCH_OUT = 1 << 13; 1480 /** 1481 * Indicates that the content of PERF_SAMPLE_IP points to 1482 * the actual instruction that triggered the event. See also 1483 * perf_event_attr::precise_ip. 1484 */ 1485 enum PERF_RECORD_MISC_EXACT_IP = 1 << 14; 1486 /** 1487 * Reserve the last bit to indicate some extended misc field 1488 */ 1489 enum PERF_RECORD_MISC_EXT_RESERVED = 1 << 15; 1490 /// 1491 struct perf_event_header 1492 { 1493 /// 1494 uint type; 1495 /// 1496 ushort misc; 1497 /// 1498 ushort size; 1499 } 1500 /// 1501 struct perf_ns_link_info 1502 { 1503 /// 1504 ulong dev; 1505 /// 1506 ulong ino; 1507 } 1508 1509 enum 1510 { 1511 /// 1512 NET_NS_INDEX = 0, 1513 /// 1514 UTS_NS_INDEX = 1, 1515 /// 1516 IPC_NS_INDEX = 2, 1517 /// 1518 PID_NS_INDEX = 3, 1519 /// 1520 USER_NS_INDEX = 4, 1521 /// 1522 MNT_NS_INDEX = 5, 1523 /// 1524 CGROUP_NS_INDEX = 6, 1525 NR_NAMESPACES = 7 /** number of available namespaces */ 1526 } 1527 /// 1528 enum perf_event_type 1529 { 1530 /** 1531 * If perf_event_attr.sample_id_all is set then all event types will 1532 * have the sample_type selected fields related to where/when 1533 * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU, 1534 * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed 1535 * just after the perf_event_header and the fields already present for 1536 * the existing fields, i.e. at the end of the payload. That way a newer 1537 * perf.data file will be supported by older perf tools, with these new 1538 * optional fields being ignored. 1539 * --- 1540 * struct sample_id { 1541 * { u32 pid, tid; } && PERF_SAMPLE_TID 1542 * { u64 time; } && PERF_SAMPLE_TIME 1543 * { u64 id; } && PERF_SAMPLE_ID 1544 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 1545 * { u32 cpu, res; } && PERF_SAMPLE_CPU 1546 * { u64 id; } && PERF_SAMPLE_IDENTIFIER 1547 * } && perf_event_attr::sample_id_all 1548 * --- 1549 * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The 1550 * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed 1551 * relative to header.size. 1552 */ 1553 1554 /* 1555 * The MMAP events record the PROT_EXEC mappings so that we can 1556 * correlate userspace IPs to code. They have the following structure: 1557 * --- 1558 * struct { 1559 * struct perf_event_header header; 1560 * 1561 * u32 pid, tid; 1562 * u64 addr; 1563 * u64 len; 1564 * u64 pgoff; 1565 * char filename[]; 1566 * struct sample_id sample_id; 1567 * }; 1568 * --- 1569 */ 1570 PERF_RECORD_MMAP = 1, 1571 1572 /** 1573 * --- 1574 * struct { 1575 * struct perf_event_header header; 1576 * u64 id; 1577 * u64 lost; 1578 * struct sample_id sample_id; 1579 * }; 1580 * --- 1581 */ 1582 PERF_RECORD_LOST = 2, 1583 1584 /** 1585 * --- 1586 * struct { 1587 * struct perf_event_header header; 1588 * 1589 * u32 pid, tid; 1590 * char comm[]; 1591 * struct sample_id sample_id; 1592 * }; 1593 * --- 1594 */ 1595 PERF_RECORD_COMM = 3, 1596 1597 /** 1598 * --- 1599 * struct { 1600 * struct perf_event_header header; 1601 * u32 pid, ppid; 1602 * u32 tid, ptid; 1603 * u64 time; 1604 * struct sample_id sample_id; 1605 * }; 1606 * --- 1607 */ 1608 PERF_RECORD_EXIT = 4, 1609 1610 /** 1611 * --- 1612 * struct { 1613 * struct perf_event_header header; 1614 * u64 time; 1615 * u64 id; 1616 * u64 stream_id; 1617 * struct sample_id sample_id; 1618 * }; 1619 * --- 1620 */ 1621 PERF_RECORD_THROTTLE = 5, 1622 PERF_RECORD_UNTHROTTLE = 6, 1623 /** 1624 * --- 1625 * struct { 1626 * struct perf_event_header header; 1627 * u32 pid, ppid; 1628 * u32 tid, ptid; 1629 * u64 time; 1630 * struct sample_id sample_id; 1631 * }; 1632 * --- 1633 */ 1634 PERF_RECORD_FORK = 7, 1635 /** 1636 * --- 1637 * struct { 1638 * struct perf_event_header header; 1639 * u32 pid, tid; 1640 * 1641 * struct read_format values; 1642 * struct sample_id sample_id; 1643 * }; 1644 * --- 1645 */ 1646 PERF_RECORD_READ = 8, 1647 /** 1648 * --- 1649 * struct { 1650 * struct perf_event_header header; 1651 * 1652 * # 1653 * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. 1654 * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position 1655 * # is fixed relative to header. 1656 * # 1657 * 1658 * { u64 id; } && PERF_SAMPLE_IDENTIFIER 1659 * { u64 ip; } && PERF_SAMPLE_IP 1660 * { u32 pid, tid; } && PERF_SAMPLE_TID 1661 * { u64 time; } && PERF_SAMPLE_TIME 1662 * { u64 addr; } && PERF_SAMPLE_ADDR 1663 * { u64 id; } && PERF_SAMPLE_ID 1664 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID 1665 * { u32 cpu, res; } && PERF_SAMPLE_CPU 1666 * { u64 period; } && PERF_SAMPLE_PERIOD 1667 * 1668 * { struct read_format values; } && PERF_SAMPLE_READ 1669 * 1670 * { u64 nr, 1671 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 1672 * 1673 * # 1674 * # The RAW record below is opaque data wrt the ABI 1675 * # 1676 * # That is, the ABI doesn't make any promises wrt to 1677 * # the stability of its content, it may vary depending 1678 * # on event, hardware, kernel version and phase of 1679 * # the moon. 1680 * # 1681 * # In other words, PERF_SAMPLE_RAW contents are not an ABI. 1682 * # 1683 * 1684 * { u32 size; 1685 * char data[size];}&& PERF_SAMPLE_RAW 1686 * 1687 * { u64 nr; 1688 * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK 1689 * 1690 * { u64 abi; # enum perf_sample_regs_abi 1691 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER 1692 * 1693 * { u64 size; 1694 * char data[size]; 1695 * u64 dyn_size; } && PERF_SAMPLE_STACK_USER 1696 * 1697 * { u64 weight; } && PERF_SAMPLE_WEIGHT 1698 * { u64 data_src; } && PERF_SAMPLE_DATA_SRC 1699 * { u64 transaction; } && PERF_SAMPLE_TRANSACTION 1700 * { u64 abi; # enum perf_sample_regs_abi 1701 * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR 1702 * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR 1703 * }; 1704 * --- 1705 */ 1706 PERF_RECORD_SAMPLE = 9, 1707 1708 /** 1709 * --- 1710 * The MMAP2 records are an augmented version of MMAP, they add 1711 * maj, min, ino numbers to be used to uniquely identify each mapping 1712 * 1713 * struct { 1714 * struct perf_event_header header; 1715 * 1716 * u32 pid, tid; 1717 * u64 addr; 1718 * u64 len; 1719 * u64 pgoff; 1720 * u32 maj; 1721 * u32 min; 1722 * u64 ino; 1723 * u64 ino_generation; 1724 * u32 prot, flags; 1725 * char filename[]; 1726 * struct sample_id sample_id; 1727 * }; 1728 * --- 1729 */ 1730 PERF_RECORD_MMAP2 = 10, 1731 1732 /** 1733 * Records that new data landed in the AUX buffer part. 1734 * --- 1735 * struct { 1736 * struct perf_event_header header; 1737 * 1738 * u64 aux_offset; 1739 * u64 aux_size; 1740 * u64 flags; 1741 * struct sample_id sample_id; 1742 * }; 1743 * --- 1744 */ 1745 PERF_RECORD_AUX = 11, 1746 1747 /** 1748 * --- 1749 * Indicates that instruction trace has started 1750 * 1751 * struct { 1752 * struct perf_event_header header; 1753 * u32 pid; 1754 * u32 tid; 1755 * }; 1756 * --- 1757 */ 1758 PERF_RECORD_ITRACE_START = 12, 1759 1760 /** 1761 * Records the dropped/lost sample number. 1762 * --- 1763 * struct { 1764 * struct perf_event_header header; 1765 * 1766 * u64 lost; 1767 * struct sample_id sample_id; 1768 * }; 1769 * --- 1770 */ 1771 PERF_RECORD_LOST_SAMPLES = 13, 1772 1773 /** 1774 * 1775 * Records a context switch in or out (flagged by 1776 * PERF_RECORD_MISC_SWITCH_OUT). See also 1777 * PERF_RECORD_SWITCH_CPU_WIDE. 1778 * --- 1779 * struct { 1780 * struct perf_event_header header; 1781 * struct sample_id sample_id; 1782 * }; 1783 * --- 1784 */ 1785 PERF_RECORD_SWITCH = 14, 1786 1787 /** 1788 * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and 1789 * next_prev_tid that are the next (switching out) or previous 1790 * (switching in) pid/tid. 1791 * --- 1792 * struct { 1793 * struct perf_event_header header; 1794 * u32 next_prev_pid; 1795 * u32 next_prev_tid; 1796 * struct sample_id sample_id; 1797 * }; 1798 * --- 1799 */ 1800 PERF_RECORD_SWITCH_CPU_WIDE = 15, 1801 1802 /** 1803 * --- 1804 * struct { 1805 * struct perf_event_header header; 1806 * u32 pid; 1807 * u32 tid; 1808 * u64 nr_namespaces; 1809 * { u64 dev, inode; } [nr_namespaces]; 1810 * struct sample_id sample_id; 1811 * }; 1812 * --- 1813 */ 1814 PERF_RECORD_NAMESPACES = 16, 1815 1816 PERF_RECORD_MAX = 17 /* non-ABI */ 1817 } 1818 /// 1819 enum PERF_MAX_STACK_DEPTH = 127; 1820 /// 1821 enum PERF_MAX_CONTEXTS_PER_STACK = 8; 1822 /// 1823 enum perf_callchain_context 1824 { 1825 /// 1826 PERF_CONTEXT_HV = cast(ulong)-32, 1827 /// 1828 PERF_CONTEXT_KERNEL = cast(ulong)-128, 1829 /// 1830 PERF_CONTEXT_USER = cast(ulong)-512, 1831 /// 1832 PERF_CONTEXT_GUEST = cast(ulong)-2048, 1833 /// 1834 PERF_CONTEXT_GUEST_KERNEL = cast(ulong)-2176, 1835 /// 1836 PERF_CONTEXT_GUEST_USER = cast(ulong)-2560, 1837 /// 1838 PERF_CONTEXT_MAX = cast(ulong)-4095 1839 } 1840 1841 /** 1842 * PERF_RECORD_AUX::flags bits 1843 */ 1844 enum PERF_AUX_FLAG_TRUNCATED = 0x01; /** record was truncated to fit */ 1845 enum PERF_AUX_FLAG_OVERWRITE = 0x02; /** snapshot from overwrite mode */ 1846 enum PERF_AUX_FLAG_PARTIAL = 0x04; /** record contains gaps */ 1847 enum PERF_AUX_FLAG_COLLISION = 0x08; /** sample collided with another */ 1848 /// 1849 enum PERF_FLAG_FD_NO_GROUP = 1UL << 0; 1850 /// 1851 enum PERF_FLAG_FD_OUTPUT = 1UL << 1; 1852 enum PERF_FLAG_PID_CGROUP = 1UL << 2; /** pid=cgroup id, per-cpu mode only */ 1853 enum PERF_FLAG_FD_CLOEXEC = 1UL << 3; /** O_CLOEXEC */ 1854 ///perm_mem_data_src is endian specific. 1855 version (LittleEndian) 1856 { 1857 /// 1858 union perf_mem_data_src 1859 { 1860 /// 1861 ulong val; 1862 1863 struct 1864 { 1865 /* mixin(bitfields!(ulong, "mem_op", 5, ulong, "mem_lvl", 14, ulong, 1866 "mem_snoop", 5, ulong, "mem_lock", 2, ulong, "mem_dtlb", 7, ulong, 1867 "mem_lvl_num", 4, ulong, "mem_remote", 1, ulong, 1868 "mem_snoopx", 2, ulong, "mem_rsvd", 24)); */ 1869 1870 private ulong perf_mem_data_src_bitmanip; 1871 /// 1872 @property ulong mem_op() @safe pure nothrow @nogc const 1873 { 1874 auto result = (perf_mem_data_src_bitmanip & 31U) >> 0U; 1875 return cast(ulong) result; 1876 } 1877 /// 1878 @property void mem_op(ulong v) @safe pure nothrow @nogc 1879 { 1880 assert(v >= mem_op_min, 1881 "Value is smaller than the minimum value of bitfield 'mem_op'"); 1882 assert(v <= mem_op_max, 1883 "Value is greater than the maximum value of bitfield 'mem_op'"); 1884 perf_mem_data_src_bitmanip = cast( 1885 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & ( 1886 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 31U)) | ( 1887 (cast(typeof(perf_mem_data_src_bitmanip)) v << 0U) & 31U)); 1888 } 1889 1890 enum ulong mem_op_min = cast(ulong) 0U; 1891 enum ulong mem_op_max = cast(ulong) 31U; 1892 /// 1893 @property ulong mem_lvl() @safe pure nothrow @nogc const 1894 { 1895 auto result = (perf_mem_data_src_bitmanip & 524256U) >> 5U; 1896 return cast(ulong) result; 1897 } 1898 /// 1899 @property void mem_lvl(ulong v) @safe pure nothrow @nogc 1900 { 1901 assert(v >= mem_lvl_min, 1902 "Value is smaller than the minimum value of bitfield 'mem_lvl'"); 1903 assert(v <= mem_lvl_max, 1904 "Value is greater than the maximum value of bitfield 'mem_lvl'"); 1905 perf_mem_data_src_bitmanip = cast( 1906 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & ( 1907 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 524256U)) | ( 1908 (cast(typeof(perf_mem_data_src_bitmanip)) v << 5U) & 524256U)); 1909 } 1910 1911 enum ulong mem_lvl_min = cast(ulong) 0U; 1912 enum ulong mem_lvl_max = cast(ulong) 16383U; 1913 /// 1914 @property ulong mem_snoop() @safe pure nothrow @nogc const 1915 { 1916 auto result = (perf_mem_data_src_bitmanip & 16252928U) >> 19U; 1917 return cast(ulong) result; 1918 } 1919 /// 1920 @property void mem_snoop(ulong v) @safe pure nothrow @nogc 1921 { 1922 assert(v >= mem_snoop_min, 1923 "Value is smaller than the minimum value of bitfield 'mem_snoop'"); 1924 assert(v <= mem_snoop_max, 1925 "Value is greater than the maximum value of bitfield 'mem_snoop'"); 1926 perf_mem_data_src_bitmanip = cast( 1927 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & ( 1928 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 16252928U)) | ( 1929 (cast(typeof(perf_mem_data_src_bitmanip)) v << 19U) & 16252928U)); 1930 } 1931 1932 enum ulong mem_snoop_min = cast(ulong) 0U; 1933 enum ulong mem_snoop_max = cast(ulong) 31U; 1934 /// 1935 @property ulong mem_lock() @safe pure nothrow @nogc const 1936 { 1937 auto result = (perf_mem_data_src_bitmanip & 50331648U) >> 24U; 1938 return cast(ulong) result; 1939 } 1940 /// 1941 @property void mem_lock(ulong v) @safe pure nothrow @nogc 1942 { 1943 assert(v >= mem_lock_min, 1944 "Value is smaller than the minimum value of bitfield 'mem_lock'"); 1945 assert(v <= mem_lock_max, 1946 "Value is greater than the maximum value of bitfield 'mem_lock'"); 1947 perf_mem_data_src_bitmanip = cast( 1948 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & ( 1949 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 50331648U)) | ( 1950 (cast(typeof(perf_mem_data_src_bitmanip)) v << 24U) & 50331648U)); 1951 } 1952 1953 enum ulong mem_lock_min = cast(ulong) 0U; 1954 enum ulong mem_lock_max = cast(ulong) 3U; 1955 /// 1956 @property ulong mem_dtlb() @safe pure nothrow @nogc const 1957 { 1958 auto result = (perf_mem_data_src_bitmanip & 8522825728UL) >> 26U; 1959 return cast(ulong) result; 1960 } 1961 /// 1962 @property void mem_dtlb(ulong v) @safe pure nothrow @nogc 1963 { 1964 assert(v >= mem_dtlb_min, 1965 "Value is smaller than the minimum value of bitfield 'mem_dtlb'"); 1966 assert(v <= mem_dtlb_max, 1967 "Value is greater than the maximum value of bitfield 'mem_dtlb'"); 1968 perf_mem_data_src_bitmanip = cast( 1969 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & ( 1970 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 8522825728UL)) | ( 1971 (cast(typeof(perf_mem_data_src_bitmanip)) v << 26U) & 8522825728UL)); 1972 } 1973 1974 enum ulong mem_dtlb_min = cast(ulong) 0U; 1975 enum ulong mem_dtlb_max = cast(ulong) 127U; 1976 /// 1977 @property ulong mem_lvl_num() @safe pure nothrow @nogc const 1978 { 1979 auto result = (perf_mem_data_src_bitmanip & 128849018880UL) >> 33U; 1980 return cast(ulong) result; 1981 } 1982 /// 1983 @property void mem_lvl_num(ulong v) @safe pure nothrow @nogc 1984 { 1985 assert(v >= mem_lvl_num_min, 1986 "Value is smaller than the minimum value of bitfield 'mem_lvl_num'"); 1987 assert(v <= mem_lvl_num_max, 1988 "Value is greater than the maximum value of bitfield 'mem_lvl_num'"); 1989 perf_mem_data_src_bitmanip = cast( 1990 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & ( 1991 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 128849018880UL)) | ( 1992 (cast(typeof(perf_mem_data_src_bitmanip)) v << 33U) & 128849018880UL)); 1993 } 1994 1995 enum ulong mem_lvl_num_min = cast(ulong) 0U; 1996 enum ulong mem_lvl_num_max = cast(ulong) 15U; 1997 /// 1998 @property ulong mem_remote() @safe pure nothrow @nogc const 1999 { 2000 auto result = (perf_mem_data_src_bitmanip & 137438953472UL) >> 37U; 2001 return cast(ulong) result; 2002 } 2003 /// 2004 @property void mem_remote(ulong v) @safe pure nothrow @nogc 2005 { 2006 assert(v >= mem_remote_min, 2007 "Value is smaller than the minimum value of bitfield 'mem_remote'"); 2008 assert(v <= mem_remote_max, 2009 "Value is greater than the maximum value of bitfield 'mem_remote'"); 2010 perf_mem_data_src_bitmanip = cast( 2011 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & ( 2012 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 137438953472UL)) | ( 2013 (cast(typeof(perf_mem_data_src_bitmanip)) v << 37U) & 137438953472UL)); 2014 } 2015 2016 enum ulong mem_remote_min = cast(ulong) 0U; 2017 enum ulong mem_remote_max = cast(ulong) 1U; 2018 /// 2019 @property ulong mem_snoopx() @safe pure nothrow @nogc const 2020 { 2021 auto result = (perf_mem_data_src_bitmanip & 824633720832UL) >> 38U; 2022 return cast(ulong) result; 2023 } 2024 /// 2025 @property void mem_snoopx(ulong v) @safe pure nothrow @nogc 2026 { 2027 assert(v >= mem_snoopx_min, 2028 "Value is smaller than the minimum value of bitfield 'mem_snoopx'"); 2029 assert(v <= mem_snoopx_max, 2030 "Value is greater than the maximum value of bitfield 'mem_snoopx'"); 2031 perf_mem_data_src_bitmanip = cast( 2032 typeof(perf_mem_data_src_bitmanip))((perf_mem_data_src_bitmanip & ( 2033 -1 - cast(typeof(perf_mem_data_src_bitmanip)) 824633720832UL)) | ( 2034 (cast(typeof(perf_mem_data_src_bitmanip)) v << 38U) & 824633720832UL)); 2035 } 2036 2037 enum ulong mem_snoopx_min = cast(ulong) 0U; 2038 enum ulong mem_snoopx_max = cast(ulong) 3U; 2039 /// 2040 @property ulong mem_rsvd() @safe pure nothrow @nogc const 2041 { 2042 auto result = (perf_mem_data_src_bitmanip & 18446742974197923840UL) >> 40U; 2043 return cast(ulong) result; 2044 } 2045 /// 2046 @property void mem_rsvd(ulong v) @safe pure nothrow @nogc 2047 { 2048 assert(v >= mem_rsvd_min, 2049 "Value is smaller than the minimum value of bitfield 'mem_rsvd'"); 2050 assert(v <= mem_rsvd_max, 2051 "Value is greater than the maximum value of bitfield 'mem_rsvd'"); 2052 perf_mem_data_src_bitmanip = cast( 2053 typeof(perf_mem_data_src_bitmanip))( 2054 (perf_mem_data_src_bitmanip & (-1 - cast( 2055 typeof(perf_mem_data_src_bitmanip)) 18446742974197923840UL)) | ( 2056 (cast(typeof(perf_mem_data_src_bitmanip)) v << 40U) & 18446742974197923840UL)); 2057 } 2058 2059 enum ulong mem_rsvd_min = cast(ulong) 0U; 2060 enum ulong mem_rsvd_max = cast(ulong) 16777215U; 2061 2062 } 2063 } 2064 } 2065 else 2066 { 2067 /// 2068 union perf_mem_data_src 2069 { 2070 /// 2071 ulong val; 2072 2073 struct 2074 { 2075 /* mixin(bitfields!(ulong, "mem_rsvd", 24, ulong, "mem_snoopx", 2, ulong, 2076 "mem_remote", 1, ulong, "mem_lvl_num", 4, ulong, "mem_dtlb", 7, ulong, 2077 "mem_lock", 2, ulong, "mem_snoop", 5, ulong, "mem_lvl", 2078 14, ulong, "mem_op", 5)); */ 2079 private ulong perf_mem_data_src; 2080 /// 2081 @property ulong mem_rsvd() @safe pure nothrow @nogc const 2082 { 2083 auto result = (perf_mem_data_src & 16777215U) >> 0U; 2084 return cast(ulong) result; 2085 } 2086 /// 2087 @property void mem_rsvd(ulong v) @safe pure nothrow @nogc 2088 { 2089 assert(v >= mem_rsvd_min, 2090 "Value is smaller than the minimum value of bitfield 'mem_rsvd'"); 2091 assert(v <= mem_rsvd_max, 2092 "Value is greater than the maximum value of bitfield 'mem_rsvd'"); 2093 perf_mem_data_src = cast(typeof(perf_mem_data_src))( 2094 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 16777215U)) | ( 2095 (cast(typeof(perf_mem_data_src)) v << 0U) & 16777215U)); 2096 } 2097 2098 enum ulong mem_rsvd_min = cast(ulong) 0U; 2099 enum ulong mem_rsvd_max = cast(ulong) 16777215U; 2100 /// 2101 @property ulong mem_snoopx() @safe pure nothrow @nogc const 2102 { 2103 auto result = (perf_mem_data_src & 50331648U) >> 24U; 2104 return cast(ulong) result; 2105 } 2106 /// 2107 @property void mem_snoopx(ulong v) @safe pure nothrow @nogc 2108 { 2109 assert(v >= mem_snoopx_min, 2110 "Value is smaller than the minimum value of bitfield 'mem_snoopx'"); 2111 assert(v <= mem_snoopx_max, 2112 "Value is greater than the maximum value of bitfield 'mem_snoopx'"); 2113 perf_mem_data_src = cast(typeof(perf_mem_data_src))( 2114 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 50331648U)) | ( 2115 (cast(typeof(perf_mem_data_src)) v << 24U) & 50331648U)); 2116 } 2117 2118 enum ulong mem_snoopx_min = cast(ulong) 0U; 2119 enum ulong mem_snoopx_max = cast(ulong) 3U; 2120 /// 2121 @property ulong mem_remote() @safe pure nothrow @nogc const 2122 { 2123 auto result = (perf_mem_data_src & 67108864U) >> 26U; 2124 return cast(ulong) result; 2125 } 2126 /// 2127 @property void mem_remote(ulong v) @safe pure nothrow @nogc 2128 { 2129 assert(v >= mem_remote_min, 2130 "Value is smaller than the minimum value of bitfield 'mem_remote'"); 2131 assert(v <= mem_remote_max, 2132 "Value is greater than the maximum value of bitfield 'mem_remote'"); 2133 perf_mem_data_src = cast(typeof(perf_mem_data_src))( 2134 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 67108864U)) | ( 2135 (cast(typeof(perf_mem_data_src)) v << 26U) & 67108864U)); 2136 } 2137 2138 enum ulong mem_remote_min = cast(ulong) 0U; 2139 enum ulong mem_remote_max = cast(ulong) 1U; 2140 /// 2141 @property ulong mem_lvl_num() @safe pure nothrow @nogc const 2142 { 2143 auto result = (perf_mem_data_src & 2013265920U) >> 27U; 2144 return cast(ulong) result; 2145 } 2146 /// 2147 @property void mem_lvl_num(ulong v) @safe pure nothrow @nogc 2148 { 2149 assert(v >= mem_lvl_num_min, 2150 "Value is smaller than the minimum value of bitfield 'mem_lvl_num'"); 2151 assert(v <= mem_lvl_num_max, 2152 "Value is greater than the maximum value of bitfield 'mem_lvl_num'"); 2153 perf_mem_data_src = cast(typeof(perf_mem_data_src))( 2154 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 2013265920U)) | ( 2155 (cast(typeof(perf_mem_data_src)) v << 27U) & 2013265920U)); 2156 } 2157 2158 enum ulong mem_lvl_num_min = cast(ulong) 0U; 2159 enum ulong mem_lvl_num_max = cast(ulong) 15U; 2160 /// 2161 @property ulong mem_dtlb() @safe pure nothrow @nogc const 2162 { 2163 auto result = (perf_mem_data_src & 272730423296UL) >> 31U; 2164 return cast(ulong) result; 2165 } 2166 /// 2167 @property void mem_dtlb(ulong v) @safe pure nothrow @nogc 2168 { 2169 assert(v >= mem_dtlb_min, 2170 "Value is smaller than the minimum value of bitfield 'mem_dtlb'"); 2171 assert(v <= mem_dtlb_max, 2172 "Value is greater than the maximum value of bitfield 'mem_dtlb'"); 2173 perf_mem_data_src = cast(typeof(perf_mem_data_src))( 2174 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 272730423296UL)) | ( 2175 (cast(typeof(perf_mem_data_src)) v << 31U) & 272730423296UL)); 2176 } 2177 2178 enum ulong mem_dtlb_min = cast(ulong) 0U; 2179 enum ulong mem_dtlb_max = cast(ulong) 127U; 2180 /// 2181 @property ulong mem_lock() @safe pure nothrow @nogc const 2182 { 2183 auto result = (perf_mem_data_src & 824633720832UL) >> 38U; 2184 return cast(ulong) result; 2185 } 2186 /// 2187 @property void mem_lock(ulong v) @safe pure nothrow @nogc 2188 { 2189 assert(v >= mem_lock_min, 2190 "Value is smaller than the minimum value of bitfield 'mem_lock'"); 2191 assert(v <= mem_lock_max, 2192 "Value is greater than the maximum value of bitfield 'mem_lock'"); 2193 perf_mem_data_src = cast(typeof(perf_mem_data_src))( 2194 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 824633720832UL)) | ( 2195 (cast(typeof(perf_mem_data_src)) v << 38U) & 824633720832UL)); 2196 } 2197 2198 enum ulong mem_lock_min = cast(ulong) 0U; 2199 enum ulong mem_lock_max = cast(ulong) 3U; 2200 /// 2201 @property ulong mem_snoop() @safe pure nothrow @nogc const 2202 { 2203 auto result = (perf_mem_data_src & 34084860461056UL) >> 40U; 2204 return cast(ulong) result; 2205 } 2206 /// 2207 @property void mem_snoop(ulong v) @safe pure nothrow @nogc 2208 { 2209 assert(v >= mem_snoop_min, 2210 "Value is smaller than the minimum value of bitfield 'mem_snoop'"); 2211 assert(v <= mem_snoop_max, 2212 "Value is greater than the maximum value of bitfield 'mem_snoop'"); 2213 perf_mem_data_src = cast(typeof(perf_mem_data_src))( 2214 (perf_mem_data_src & (-1 - cast(typeof(perf_mem_data_src)) 34084860461056UL)) | ( 2215 (cast(typeof(perf_mem_data_src)) v << 40U) & 34084860461056UL)); 2216 } 2217 2218 enum ulong mem_snoop_min = cast(ulong) 0U; 2219 enum ulong mem_snoop_max = cast(ulong) 31U; 2220 /// 2221 @property ulong mem_lvl() @safe pure nothrow @nogc const 2222 { 2223 auto result = (perf_mem_data_src & 576425567931334656UL) >> 45U; 2224 return cast(ulong) result; 2225 } 2226 /// 2227 @property void mem_lvl(ulong v) @safe pure nothrow @nogc 2228 { 2229 assert(v >= mem_lvl_min, 2230 "Value is smaller than the minimum value of bitfield 'mem_lvl'"); 2231 assert(v <= mem_lvl_max, 2232 "Value is greater than the maximum value of bitfield 'mem_lvl'"); 2233 perf_mem_data_src = cast(typeof(perf_mem_data_src))((perf_mem_data_src & ( 2234 -1 - cast(typeof(perf_mem_data_src)) 576425567931334656UL)) | ( 2235 (cast(typeof(perf_mem_data_src)) v << 45U) & 576425567931334656UL)); 2236 } 2237 2238 enum ulong mem_lvl_min = cast(ulong) 0U; 2239 enum ulong mem_lvl_max = cast(ulong) 16383U; 2240 /// 2241 @property ulong mem_op() @safe pure nothrow @nogc const 2242 { 2243 auto result = (perf_mem_data_src & 17870283321406128128UL) >> 59U; 2244 return cast(ulong) result; 2245 } 2246 /// 2247 @property void mem_op(ulong v) @safe pure nothrow @nogc 2248 { 2249 assert(v >= mem_op_min, 2250 "Value is smaller than the minimum value of bitfield 'mem_op'"); 2251 assert(v <= mem_op_max, 2252 "Value is greater than the maximum value of bitfield 'mem_op'"); 2253 perf_mem_data_src = cast(typeof(perf_mem_data_src))((perf_mem_data_src & ( 2254 -1 - cast(typeof(perf_mem_data_src)) 17870283321406128128UL)) | ( 2255 (cast(typeof(perf_mem_data_src)) v << 59U) & 17870283321406128128UL)); 2256 } 2257 2258 enum ulong mem_op_min = cast(ulong) 0U; 2259 enum ulong mem_op_max = cast(ulong) 31U; 2260 } 2261 } 2262 } 2263 2264 /* snoop mode, ext */ 2265 /* remote */ 2266 /* memory hierarchy level number */ 2267 /* tlb access */ 2268 /* lock instr */ 2269 /* snoop mode */ 2270 /* memory hierarchy level */ 2271 /* type of opcode */ 2272 2273 /** type of opcode (load/store/prefetch,code) */ 2274 enum PERF_MEM_OP_NA = 0x01; /** not available */ 2275 enum PERF_MEM_OP_LOAD = 0x02; /** load instruction */ 2276 enum PERF_MEM_OP_STORE = 0x04; /** store instruction */ 2277 enum PERF_MEM_OP_PFETCH = 0x08; /** prefetch */ 2278 enum PERF_MEM_OP_EXEC = 0x10; /** code (execution) */ 2279 enum PERF_MEM_OP_SHIFT = 0; 2280 2281 /* memory hierarchy (memory level, hit or miss) */ 2282 enum PERF_MEM_LVL_NA = 0x01; /** not available */ 2283 enum PERF_MEM_LVL_HIT = 0x02; /** hit level */ 2284 enum PERF_MEM_LVL_MISS = 0x04; /** miss level */ 2285 enum PERF_MEM_LVL_L1 = 0x08; /** L1 */ 2286 enum PERF_MEM_LVL_LFB = 0x10; /** Line Fill Buffer */ 2287 enum PERF_MEM_LVL_L2 = 0x20; /** L2 */ 2288 enum PERF_MEM_LVL_L3 = 0x40; /** L3 */ 2289 enum PERF_MEM_LVL_LOC_RAM = 0x80; /** Local DRAM */ 2290 enum PERF_MEM_LVL_REM_RAM1 = 0x100; /** Remote DRAM (1 hop) */ 2291 enum PERF_MEM_LVL_REM_RAM2 = 0x200; /** Remote DRAM (2 hops) */ 2292 enum PERF_MEM_LVL_REM_CCE1 = 0x400; /** Remote Cache (1 hop) */ 2293 enum PERF_MEM_LVL_REM_CCE2 = 0x800; /** Remote Cache (2 hops) */ 2294 enum PERF_MEM_LVL_IO = 0x1000; /** I/O memory */ 2295 enum PERF_MEM_LVL_UNC = 0x2000; /** Uncached memory */ 2296 /// 2297 enum PERF_MEM_LVL_SHIFT = 5; 2298 2299 enum PERF_MEM_REMOTE_REMOTE = 0x01; /** Remote */ 2300 /// 2301 enum PERF_MEM_REMOTE_SHIFT = 37; 2302 2303 enum PERF_MEM_LVLNUM_L1 = 0x01; /** L1 */ 2304 enum PERF_MEM_LVLNUM_L2 = 0x02; /** L2 */ 2305 enum PERF_MEM_LVLNUM_L3 = 0x03; /** L3 */ 2306 enum PERF_MEM_LVLNUM_L4 = 0x04; /** L4 */ 2307 /* 5-0xa available */ 2308 enum PERF_MEM_LVLNUM_ANY_CACHE = 0x0b; /** Any cache */ 2309 enum PERF_MEM_LVLNUM_LFB = 0x0c; /** LFB */ 2310 enum PERF_MEM_LVLNUM_RAM = 0x0d; /** RAM */ 2311 enum PERF_MEM_LVLNUM_PMEM = 0x0e; /** PMEM */ 2312 enum PERF_MEM_LVLNUM_NA = 0x0f; /** N/A */ 2313 /// 2314 enum PERF_MEM_LVLNUM_SHIFT = 33; 2315 2316 /* snoop mode */ 2317 enum PERF_MEM_SNOOP_NA = 0x01; /** not available */ 2318 enum PERF_MEM_SNOOP_NONE = 0x02; /** no snoop */ 2319 enum PERF_MEM_SNOOP_HIT = 0x04; /** snoop hit */ 2320 enum PERF_MEM_SNOOP_MISS = 0x08; /** snoop miss */ 2321 enum PERF_MEM_SNOOP_HITM = 0x10; /** snoop hit modified */ 2322 /// 2323 enum PERF_MEM_SNOOP_SHIFT = 19; 2324 2325 enum PERF_MEM_SNOOPX_FWD = 0x01; /** forward */ 2326 /** 1 free */ 2327 enum PERF_MEM_SNOOPX_SHIFT = 37; 2328 2329 /** locked instruction */ 2330 enum PERF_MEM_LOCK_NA = 0x01; /** not available */ 2331 enum PERF_MEM_LOCK_LOCKED = 0x02; /** locked transaction */ 2332 /// 2333 enum PERF_MEM_LOCK_SHIFT = 24; 2334 2335 /* TLB access */ 2336 enum PERF_MEM_TLB_NA = 0x01; /** not available */ 2337 enum PERF_MEM_TLB_HIT = 0x02; /** hit level */ 2338 enum PERF_MEM_TLB_MISS = 0x04; /** miss level */ 2339 enum PERF_MEM_TLB_L1 = 0x08; /** L1 */ 2340 enum PERF_MEM_TLB_L2 = 0x10; /** L2 */ 2341 enum PERF_MEM_TLB_WK = 0x20; /** Hardware Walker*/ 2342 enum PERF_MEM_TLB_OS = 0x40; /** OS fault handler */ 2343 /// 2344 enum PERF_MEM_TLB_SHIFT = 26; 2345 2346 /** 2347 * single taken branch record layout: 2348 * 2349 * from: source instruction (may not always be a branch insn) 2350 * to: branch target 2351 * mispred: branch target was mispredicted 2352 * predicted: branch target was predicted 2353 * 2354 * support for mispred, predicted is optional. In case it 2355 * is not supported mispred = predicted = 0. 2356 * 2357 * in_tx: running in a hardware transaction 2358 * abort: aborting a hardware transaction 2359 * cycles: cycles from last branch (or 0 if not supported) 2360 * type: branch type 2361 */ 2362 struct perf_branch_entry 2363 { 2364 /// 2365 ulong from; 2366 /// 2367 ulong to; 2368 2369 /* mixin(bitfields!(ulong, "mispred", 1, ulong, "predicted", 1, ulong, 2370 "in_tx", 1, ulong, "abort", 1, ulong, "cycles", 16, ulong, "type", 2371 4, ulong, "reserved", 40)); */ 2372 private ulong perf_branch_entry_bitmanip; 2373 /// 2374 @property ulong mispred() @safe pure nothrow @nogc const 2375 { 2376 auto result = (perf_branch_entry_bitmanip & 1U) >> 0U; 2377 return cast(ulong) result; 2378 } 2379 /// 2380 @property void mispred(ulong v) @safe pure nothrow @nogc 2381 { 2382 assert(v >= mispred_min, 2383 "Value is smaller than the minimum value of bitfield 'mispred'"); 2384 assert(v <= mispred_max, 2385 "Value is greater than the maximum value of bitfield 'mispred'"); 2386 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))( 2387 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 1U)) | ( 2388 (cast(typeof(perf_branch_entry_bitmanip)) v << 0U) & 1U)); 2389 } 2390 2391 enum ulong mispred_min = cast(ulong) 0U; 2392 enum ulong mispred_max = cast(ulong) 1U; 2393 /// 2394 @property ulong predicted() @safe pure nothrow @nogc const 2395 { 2396 auto result = (perf_branch_entry_bitmanip & 2U) >> 1U; 2397 return cast(ulong) result; 2398 } 2399 /// 2400 @property void predicted(ulong v) @safe pure nothrow @nogc 2401 { 2402 assert(v >= predicted_min, 2403 "Value is smaller than the minimum value of bitfield 'predicted'"); 2404 assert(v <= predicted_max, 2405 "Value is greater than the maximum value of bitfield 'predicted'"); 2406 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))( 2407 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 2U)) | ( 2408 (cast(typeof(perf_branch_entry_bitmanip)) v << 1U) & 2U)); 2409 } 2410 2411 enum ulong predicted_min = cast(ulong) 0U; 2412 enum ulong predicted_max = cast(ulong) 1U; 2413 /// 2414 @property ulong in_tx() @safe pure nothrow @nogc const 2415 { 2416 auto result = (perf_branch_entry_bitmanip & 4U) >> 2U; 2417 return cast(ulong) result; 2418 } 2419 /// 2420 @property void in_tx(ulong v) @safe pure nothrow @nogc 2421 { 2422 assert(v >= in_tx_min, 2423 "Value is smaller than the minimum value of bitfield 'in_tx'"); 2424 assert(v <= in_tx_max, 2425 "Value is greater than the maximum value of bitfield 'in_tx'"); 2426 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))( 2427 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 4U)) | ( 2428 (cast(typeof(perf_branch_entry_bitmanip)) v << 2U) & 4U)); 2429 } 2430 2431 enum ulong in_tx_min = cast(ulong) 0U; 2432 enum ulong in_tx_max = cast(ulong) 1U; 2433 /// 2434 @property ulong abort() @safe pure nothrow @nogc const 2435 { 2436 auto result = (perf_branch_entry_bitmanip & 8U) >> 3U; 2437 return cast(ulong) result; 2438 } 2439 /// 2440 @property void abort(ulong v) @safe pure nothrow @nogc 2441 { 2442 assert(v >= abort_min, 2443 "Value is smaller than the minimum value of bitfield 'abort'"); 2444 assert(v <= abort_max, 2445 "Value is greater than the maximum value of bitfield 'abort'"); 2446 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))( 2447 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 8U)) | ( 2448 (cast(typeof(perf_branch_entry_bitmanip)) v << 3U) & 8U)); 2449 } 2450 2451 enum ulong abort_min = cast(ulong) 0U; 2452 enum ulong abort_max = cast(ulong) 1U; 2453 /// 2454 @property ulong cycles() @safe pure nothrow @nogc const 2455 { 2456 auto result = (perf_branch_entry_bitmanip & 1048560U) >> 4U; 2457 return cast(ulong) result; 2458 } 2459 /// 2460 @property void cycles(ulong v) @safe pure nothrow @nogc 2461 { 2462 assert(v >= cycles_min, 2463 "Value is smaller than the minimum value of bitfield 'cycles'"); 2464 assert(v <= cycles_max, 2465 "Value is greater than the maximum value of bitfield 'cycles'"); 2466 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))( 2467 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 1048560U)) | ( 2468 (cast(typeof(perf_branch_entry_bitmanip)) v << 4U) & 1048560U)); 2469 } 2470 2471 enum ulong cycles_min = cast(ulong) 0U; 2472 enum ulong cycles_max = cast(ulong) 65535U; 2473 /// 2474 @property ulong type() @safe pure nothrow @nogc const 2475 { 2476 auto result = (perf_branch_entry_bitmanip & 15728640U) >> 20U; 2477 return cast(ulong) result; 2478 } 2479 /// 2480 @property void type(ulong v) @safe pure nothrow @nogc 2481 { 2482 assert(v >= type_min, "Value is smaller than the minimum value of bitfield 'type'"); 2483 assert(v <= type_max, "Value is greater than the maximum value of bitfield 'type'"); 2484 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))( 2485 (perf_branch_entry_bitmanip & (-1 - cast(typeof(perf_branch_entry_bitmanip)) 15728640U)) | ( 2486 (cast(typeof(perf_branch_entry_bitmanip)) v << 20U) & 15728640U)); 2487 } 2488 2489 enum ulong type_min = cast(ulong) 0U; 2490 enum ulong type_max = cast(ulong) 15U; 2491 /// 2492 @property ulong reserved() @safe pure nothrow @nogc const 2493 { 2494 auto result = (perf_branch_entry_bitmanip & 18446744073692774400UL) >> 24U; 2495 return cast(ulong) result; 2496 } 2497 /// 2498 @property void reserved(ulong v) @safe pure nothrow @nogc 2499 { 2500 assert(v >= reserved_min, 2501 "Value is smaller than the minimum value of bitfield 'reserved'"); 2502 assert(v <= reserved_max, 2503 "Value is greater than the maximum value of bitfield 'reserved'"); 2504 perf_branch_entry_bitmanip = cast(typeof(perf_branch_entry_bitmanip))( 2505 (perf_branch_entry_bitmanip & (-1 - cast( 2506 typeof(perf_branch_entry_bitmanip)) 18446744073692774400UL)) | ( 2507 (cast(typeof(perf_branch_entry_bitmanip)) v << 24U) & 18446744073692774400UL)); 2508 } 2509 2510 enum ulong reserved_min = cast(ulong) 0U; 2511 enum ulong reserved_max = cast(ulong) 1099511627775UL; 2512 }