Newer
Older
#include "builtin.h"
#include "perf.h"
#include "util/util.h"
#include "util/cache.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
#include "util/session.h"
#include "util/intlist.h"
#include "util/parse-options.h"
#include "util/trace-event.h"
#include "util/debug.h"
#include "util/tool.h"
#include "util/stat.h"
#include <sys/prctl.h>
#ifdef HAVE_TIMERFD_SUPPORT
#include <semaphore.h>
#include <pthread.h>
#include <math.h>
#if defined(__i386__) || defined(__x86_64__)
#include <asm/svm.h>
#include <asm/vmx.h>
#include <asm/kvm.h>
struct event_key {
#define INVALID_KEY (~0ULL)
u64 key;
int info;
};
struct kvm_event_stats {
u64 time;
struct stats stats;
};
struct kvm_event {
struct list_head hash_entry;
struct rb_node rb;
struct event_key key;
struct kvm_event_stats total;
#define DEFAULT_VCPU_NUM 8
int max_vcpu;
struct kvm_event_stats *vcpu;
};
typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int);
struct kvm_event_key {
const char *name;
key_cmp_fun key;
};
bool (*is_begin_event)(struct perf_evsel *evsel,
struct perf_sample *sample,
struct event_key *key);
bool (*is_end_event)(struct perf_evsel *evsel,
struct perf_sample *sample, struct event_key *key);
void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key,
struct exit_reasons_table {
unsigned long exit_code;
const char *reason;
};
#define EVENTS_BITS 12
#define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS)
struct perf_tool tool;
struct perf_record_opts opts;
struct perf_evlist *evlist;
struct perf_session *session;
const char *file_name;
const char *report_event;
const char *sort_key;
int trace_vcpu;
struct exit_reasons_table *exit_reasons;
int exit_reasons_size;
const char *exit_reasons_isa;
struct kvm_events_ops *events_ops;
key_cmp_fun compare;
struct list_head kvm_events_cache[EVENTS_CACHE_SIZE];
u64 total_time;
u64 total_count;
const char *pid_str;
struct intlist *pid_list;
struct rb_root result;
int timerfd;
unsigned int display_time;
bool live;
static void exit_event_get_key(struct perf_evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
key->key = perf_evsel__intval(evsel, sample, "exit_reason");
static bool kvm_exit_event(struct perf_evsel *evsel)
return !strcmp(evsel->name, "kvm:kvm_exit");
static bool exit_event_begin(struct perf_evsel *evsel,
struct perf_sample *sample, struct event_key *key)
if (kvm_exit_event(evsel)) {
exit_event_get_key(evsel, sample, key);
return true;
}
return false;
}
static bool kvm_entry_event(struct perf_evsel *evsel)
return !strcmp(evsel->name, "kvm:kvm_entry");
static bool exit_event_end(struct perf_evsel *evsel,
struct perf_sample *sample __maybe_unused,
struct event_key *key __maybe_unused)
return kvm_entry_event(evsel);
static struct exit_reasons_table vmx_exit_reasons[] = {
static struct exit_reasons_table svm_exit_reasons[] = {
static const char *get_exit_reason(struct perf_kvm_stat *kvm, u64 exit_code)
int i = kvm->exit_reasons_size;
struct exit_reasons_table *tbl = kvm->exit_reasons;
while (i--) {
if (tbl->exit_code == exit_code)
return tbl->reason;
tbl++;
}
pr_err("unknown kvm exit code:%lld on %s\n",
(unsigned long long)exit_code, kvm->exit_reasons_isa);
static void exit_event_decode_key(struct perf_kvm_stat *kvm,
struct event_key *key,
char decode[20])
const char *exit_reason = get_exit_reason(kvm, key->key);
scnprintf(decode, 20, "%s", exit_reason);
}
static struct kvm_events_ops exit_events = {
.is_begin_event = exit_event_begin,
.is_end_event = exit_event_end,
.decode_key = exit_event_decode_key,
.name = "VM-EXIT"
};
/*
* For the mmio events, we treat:
* the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
* the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...).
*/
static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample,
struct event_key *key)
key->key = perf_evsel__intval(evsel, sample, "gpa");
key->info = perf_evsel__intval(evsel, sample, "type");
}
#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
#define KVM_TRACE_MMIO_READ 1
#define KVM_TRACE_MMIO_WRITE 2
static bool mmio_event_begin(struct perf_evsel *evsel,
struct perf_sample *sample, struct event_key *key)
{
/* MMIO read begin event in kernel. */
return true;
/* MMIO write begin event in kernel. */
if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
mmio_event_get_key(evsel, sample, key);
return true;
}
return false;
}
static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample,
struct event_key *key)
{
/* MMIO write end event in kernel. */
return true;
/* MMIO read end event in kernel.*/
if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
mmio_event_get_key(evsel, sample, key);
return true;
}
return false;
}
static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
struct event_key *key,
char decode[20])
{
scnprintf(decode, 20, "%#lx:%s", (unsigned long)key->key,
key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
}
static struct kvm_events_ops mmio_events = {
.is_begin_event = mmio_event_begin,
.is_end_event = mmio_event_end,
.decode_key = mmio_event_decode_key,
.name = "MMIO Access"
};
/* The time of emulation pio access is from kvm_pio to kvm_entry. */
static void ioport_event_get_key(struct perf_evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
key->key = perf_evsel__intval(evsel, sample, "port");
key->info = perf_evsel__intval(evsel, sample, "rw");
static bool ioport_event_begin(struct perf_evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
if (!strcmp(evsel->name, "kvm:kvm_pio")) {
ioport_event_get_key(evsel, sample, key);
return true;
}
return false;
}
static bool ioport_event_end(struct perf_evsel *evsel,
struct perf_sample *sample __maybe_unused,
struct event_key *key __maybe_unused)
{
return kvm_entry_event(evsel);
static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
struct event_key *key,
char decode[20])
{
scnprintf(decode, 20, "%#llx:%s", (unsigned long long)key->key,
key->info ? "POUT" : "PIN");
}
static struct kvm_events_ops ioport_events = {
.is_begin_event = ioport_event_begin,
.is_end_event = ioport_event_end,
.decode_key = ioport_event_decode_key,
.name = "IO Port Access"
};
static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
if (!strcmp(kvm->report_event, "vmexit"))
kvm->events_ops = &exit_events;
else if (!strcmp(kvm->report_event, "mmio"))
kvm->events_ops = &mmio_events;
else if (!strcmp(kvm->report_event, "ioport"))
kvm->events_ops = &ioport_events;
pr_err("Unknown report event:%s\n", kvm->report_event);
ret = false;
}
return ret;
}
struct vcpu_event_record {
int vcpu_id;
u64 start_time;
struct kvm_event *last_event;
};
static void init_kvm_event_record(struct perf_kvm_stat *kvm)
for (i = 0; i < EVENTS_CACHE_SIZE; i++)
INIT_LIST_HEAD(&kvm->kvm_events_cache[i]);
#ifdef HAVE_TIMERFD_SUPPORT
static void clear_events_cache_stats(struct list_head *kvm_events_cache)
{
struct list_head *head;
struct kvm_event *event;
unsigned int i;
for (i = 0; i < EVENTS_CACHE_SIZE; i++) {
head = &kvm_events_cache[i];
list_for_each_entry(event, head, hash_entry) {
/* reset stats for event */
event->total.time = 0;
init_stats(&event->total.stats);
for (j = 0; j < event->max_vcpu; ++j) {
event->vcpu[j].time = 0;
init_stats(&event->vcpu[j].stats);
}
static int kvm_events_hash_fn(u64 key)
{
return key & (EVENTS_CACHE_SIZE - 1);
}
static bool kvm_event_expand(struct kvm_event *event, int vcpu_id)
{
int old_max_vcpu = event->max_vcpu;
if (vcpu_id < event->max_vcpu)
return true;
while (event->max_vcpu <= vcpu_id)
event->max_vcpu += DEFAULT_VCPU_NUM;
event->vcpu = realloc(event->vcpu,
event->max_vcpu * sizeof(*event->vcpu));
if (!event->vcpu) {
pr_err("Not enough memory\n");
return false;
}
memset(event->vcpu + old_max_vcpu, 0,
(event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu));
return true;
}
static struct kvm_event *kvm_alloc_init_event(struct event_key *key)
{
struct kvm_event *event;
event = zalloc(sizeof(*event));
if (!event) {
pr_err("Not enough memory\n");
return NULL;
}
event->key = *key;
return event;
}
static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm,
struct event_key *key)
{
struct kvm_event *event;
struct list_head *head;
BUG_ON(key->key == INVALID_KEY);
head = &kvm->kvm_events_cache[kvm_events_hash_fn(key->key)];
list_for_each_entry(event, head, hash_entry) {
if (event->key.key == key->key && event->key.info == key->info)
return event;
event = kvm_alloc_init_event(key);
if (!event)
return NULL;
list_add(&event->hash_entry, head);
return event;
}
static bool handle_begin_event(struct perf_kvm_stat *kvm,
struct vcpu_event_record *vcpu_record,
struct event_key *key, u64 timestamp)
{
struct kvm_event *event = NULL;
if (key->key != INVALID_KEY)
event = find_create_kvm_event(kvm, key);
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
vcpu_record->last_event = event;
vcpu_record->start_time = timestamp;
return true;
}
static void
kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff)
{
kvm_stats->time += time_diff;
update_stats(&kvm_stats->stats, time_diff);
}
static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event)
{
struct kvm_event_stats *kvm_stats = &event->total;
if (vcpu_id != -1)
kvm_stats = &event->vcpu[vcpu_id];
return rel_stddev_stats(stddev_stats(&kvm_stats->stats),
avg_stats(&kvm_stats->stats));
}
static bool update_kvm_event(struct kvm_event *event, int vcpu_id,
u64 time_diff)
{
if (vcpu_id == -1) {
kvm_update_event_stats(&event->total, time_diff);
return true;
}
if (!kvm_event_expand(event, vcpu_id))
return false;
kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff);
return true;
}
static bool handle_end_event(struct perf_kvm_stat *kvm,
struct vcpu_event_record *vcpu_record,
struct event_key *key,
struct perf_sample *sample)
{
struct kvm_event *event;
u64 time_begin, time_diff;
int vcpu;
if (kvm->trace_vcpu == -1)
vcpu = -1;
else
vcpu = vcpu_record->vcpu_id;
event = vcpu_record->last_event;
time_begin = vcpu_record->start_time;
/* The begin event is not caught. */
if (!time_begin)
return true;
/*
* In some case, the 'begin event' only records the start timestamp,
* the actual event is recognized in the 'end event' (e.g. mmio-event).
*/
/* Both begin and end events did not get the key. */
if (!event && key->key == INVALID_KEY)
return true;
if (!event)
event = find_create_kvm_event(kvm, key);
if (!event)
return false;
vcpu_record->last_event = NULL;
vcpu_record->start_time = 0;
/* seems to happen once in a while during live mode */
if (sample->time < time_begin) {
pr_debug("End time before begin time; skipping event.\n");
return true;
}
time_diff = sample->time - time_begin;
if (kvm->duration && time_diff > kvm->duration) {
char decode[32];
kvm->events_ops->decode_key(kvm, &event->key, decode);
if (strcmp(decode, "HLT")) {
pr_info("%" PRIu64 " VM %d, vcpu %d: %s event took %" PRIu64 "usec\n",
sample->time, sample->pid, vcpu_record->vcpu_id,
decode, time_diff/1000);
}
}
return update_kvm_event(event, vcpu, time_diff);
static
struct vcpu_event_record *per_vcpu_record(struct thread *thread,
struct perf_evsel *evsel,
struct perf_sample *sample)
{
/* Only kvm_entry records vcpu id. */
if (!thread->priv && kvm_entry_event(evsel)) {
struct vcpu_event_record *vcpu_record;
vcpu_record = zalloc(sizeof(*vcpu_record));
pr_err("%s: Not enough memory\n", __func__);
vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, "vcpu_id");
static bool handle_kvm_event(struct perf_kvm_stat *kvm,
struct thread *thread,
struct perf_evsel *evsel,
{
struct vcpu_event_record *vcpu_record;
struct event_key key = {.key = INVALID_KEY};
vcpu_record = per_vcpu_record(thread, evsel, sample);
/* only process events for vcpus user cares about */
if ((kvm->trace_vcpu != -1) &&
(kvm->trace_vcpu != vcpu_record->vcpu_id))
return true;
if (kvm->events_ops->is_begin_event(evsel, sample, &key))
return handle_begin_event(kvm, vcpu_record, &key, sample->time);
if (kvm->events_ops->is_end_event(evsel, sample, &key))
return handle_end_event(kvm, vcpu_record, &key, sample);
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
return true;
}
#define GET_EVENT_KEY(func, field) \
static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
{ \
if (vcpu == -1) \
return event->total.field; \
\
if (vcpu >= event->max_vcpu) \
return 0; \
\
return event->vcpu[vcpu].field; \
}
#define COMPARE_EVENT_KEY(func, field) \
GET_EVENT_KEY(func, field) \
static int compare_kvm_event_ ## func(struct kvm_event *one, \
struct kvm_event *two, int vcpu)\
{ \
return get_event_ ##func(one, vcpu) > \
get_event_ ##func(two, vcpu); \
}
GET_EVENT_KEY(time, time);
COMPARE_EVENT_KEY(count, stats.n);
COMPARE_EVENT_KEY(mean, stats.mean);
GET_EVENT_KEY(max, stats.max);
GET_EVENT_KEY(min, stats.min);
#define DEF_SORT_NAME_KEY(name, compare_key) \
{ #name, compare_kvm_event_ ## compare_key }
static struct kvm_event_key keys[] = {
DEF_SORT_NAME_KEY(sample, count),
DEF_SORT_NAME_KEY(time, mean),
{ NULL, NULL }
};
static bool select_key(struct perf_kvm_stat *kvm)
{
int i;
for (i = 0; keys[i].name; i++) {
if (!strcmp(keys[i].name, kvm->sort_key)) {
kvm->compare = keys[i].key;
pr_err("Unknown compare key:%s\n", kvm->sort_key);
static void insert_to_result(struct rb_root *result, struct kvm_event *event,
key_cmp_fun bigger, int vcpu)
struct rb_node **rb = &result->rb_node;
struct rb_node *parent = NULL;
struct kvm_event *p;
while (*rb) {
p = container_of(*rb, struct kvm_event, rb);
parent = *rb;
if (bigger(event, p, vcpu))
rb = &(*rb)->rb_left;
else
rb = &(*rb)->rb_right;
}
rb_link_node(&event->rb, parent, rb);
rb_insert_color(&event->rb, result);
static void
update_total_count(struct perf_kvm_stat *kvm, struct kvm_event *event)
int vcpu = kvm->trace_vcpu;
kvm->total_count += get_event_count(event, vcpu);
kvm->total_time += get_event_time(event, vcpu);
}
static bool event_is_valid(struct kvm_event *event, int vcpu)
{
return !!get_event_count(event, vcpu);
}
static void sort_result(struct perf_kvm_stat *kvm)
int vcpu = kvm->trace_vcpu;
for (i = 0; i < EVENTS_CACHE_SIZE; i++) {
list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) {
update_total_count(kvm, event);
insert_to_result(&kvm->result, event,
kvm->compare, vcpu);
}
/* returns left most element of result, and erase it */
static struct kvm_event *pop_from_result(struct rb_root *result)
struct rb_node *node = rb_first(result);
rb_erase(node, result);
return container_of(node, struct kvm_event, rb);
}
static void print_vcpu_info(struct perf_kvm_stat *kvm)
if (kvm->live) {
if (kvm->opts.target.system_wide)
pr_info("all VMs, ");
else if (kvm->opts.target.pid)
pr_info("pid(s) %s, ", kvm->opts.target.pid);
else
pr_info("dazed and confused on what is monitored, ");
}
if (vcpu == -1)
pr_info("all VCPUs:\n\n");
else
pr_info("VCPU %d:\n\n", vcpu);
}
static void show_timeofday(void)
{
char date[64];
struct timeval tv;
struct tm ltime;
gettimeofday(&tv, NULL);
if (localtime_r(&tv.tv_sec, <ime)) {
strftime(date, sizeof(date), "%H:%M:%S", <ime);
pr_info("%s.%06ld", date, tv.tv_usec);
} else
pr_info("00:00:00.000000");
return;
}
static void print_result(struct perf_kvm_stat *kvm)
{
char decode[20];
struct kvm_event *event;
int vcpu = kvm->trace_vcpu;
if (kvm->live) {
puts(CONSOLE_CLEAR);
show_timeofday();
}
pr_info("%20s ", kvm->events_ops->name);
pr_info("%10s ", "Samples");
pr_info("%9s ", "Samples%");
pr_info("%9s ", "Time%");
pr_info("%10s ", "Min Time");
pr_info("%10s ", "Max Time");
pr_info("%16s ", "Avg time");
pr_info("\n\n");
while ((event = pop_from_result(&kvm->result))) {
ecount = get_event_count(event, vcpu);
etime = get_event_time(event, vcpu);
max = get_event_max(event, vcpu);
min = get_event_min(event, vcpu);
kvm->events_ops->decode_key(kvm, &event->key, decode);
pr_info("%20s ", decode);
pr_info("%10llu ", (unsigned long long)ecount);
pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
pr_info("%8" PRIu64 "us ", min / 1000);
pr_info("%8" PRIu64 "us ", max / 1000);
pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount/1e3,
kvm_event_rel_stddev(vcpu, event));
pr_info("\n");
}
pr_info("\nTotal Samples:%" PRIu64 ", Total events handled time:%.2fus.\n\n",
kvm->total_count, kvm->total_time / 1e3);
if (kvm->lost_events)
pr_info("\nLost events: %" PRIu64 "\n\n", kvm->lost_events);
}
#ifdef HAVE_TIMERFD_SUPPORT
static int process_lost_event(struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct machine *machine __maybe_unused)
{
struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat, tool);
kvm->lost_events++;
return 0;
static bool skip_sample(struct perf_kvm_stat *kvm,
struct perf_sample *sample)
{
if (kvm->pid_list && intlist__find(kvm->pid_list, sample->pid) == NULL)
return true;
return false;
}
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine)
{
struct thread *thread;
struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
tool);
if (skip_sample(kvm, sample))
return 0;
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
}
if (!handle_kvm_event(kvm, thread, evsel, sample))
static int cpu_isa_config(struct perf_kvm_stat *kvm)
char buf[64], *cpuid;
int err, isa;
if (kvm->live) {
err = get_cpuid(buf, sizeof(buf));
if (err != 0) {
pr_err("Failed to look up CPU type (Intel or AMD)\n");
return err;
}
cpuid = buf;
} else
cpuid = kvm->session->header.env.cpuid;
if (strstr(cpuid, "Intel"))
isa = 1;
else if (strstr(cpuid, "AMD"))
isa = 0;
else {
pr_err("CPU %s is not supported.\n", cpuid);
return -ENOTSUP;
}
if (isa == 1) {
kvm->exit_reasons = vmx_exit_reasons;
kvm->exit_reasons_size = ARRAY_SIZE(vmx_exit_reasons);
kvm->exit_reasons_isa = "VMX";
}
return 0;
}
static bool verify_vcpu(int vcpu)
{
if (vcpu != -1 && vcpu < 0) {
pr_err("Invalid vcpu:%d.\n", vcpu);
return false;
}
return true;
}
#ifdef HAVE_TIMERFD_SUPPORT
/* keeping the max events to a modest level to keep
* the processing of samples per mmap smooth.
*/
#define PERF_KVM__MAX_EVENTS_PER_MMAP 25
static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
u64 *mmap_time)
{
union perf_event *event;
struct perf_sample sample;
s64 n = 0;
int err;
*mmap_time = ULLONG_MAX;
while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) {
err = perf_evlist__parse_sample(kvm->evlist, event, &sample);
if (err) {
perf_evlist__mmap_consume(kvm->evlist, idx);
pr_err("Failed to parse sample\n");
return -1;
}
err = perf_session_queue_event(kvm->session, event, &sample, 0);
/*
* FIXME: Here we can't consume the event, as perf_session_queue_event will
* point to it, and it'll get possibly overwritten by the kernel.
*/
perf_evlist__mmap_consume(kvm->evlist, idx);
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
if (err) {
pr_err("Failed to enqueue sample: %d\n", err);
return -1;
}
/* save time stamp of our first sample for this mmap */
if (n == 0)
*mmap_time = sample.time;
/* limit events per mmap handled all at once */
n++;
if (n == PERF_KVM__MAX_EVENTS_PER_MMAP)
break;
}
return n;
}
static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm)
{
int i, err, throttled = 0;
s64 n, ntotal = 0;
u64 flush_time = ULLONG_MAX, mmap_time;
for (i = 0; i < kvm->evlist->nr_mmaps; i++) {
n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time);
if (n < 0)
return -1;
/* flush time is going to be the minimum of all the individual
* mmap times. Essentially, we flush all the samples queued up
* from the last pass under our minimal start time -- that leaves
* a very small race for samples to come in with a lower timestamp.
* The ioctl to return the perf_clock timestamp should close the
* race entirely.
*/
if (mmap_time < flush_time)
flush_time = mmap_time;
ntotal += n;
if (n == PERF_KVM__MAX_EVENTS_PER_MMAP)
throttled = 1;
}
/* flush queue after each round in which we processed events */
if (ntotal) {
kvm->session->ordered_samples.next_flush = flush_time;
err = kvm->tool.finished_round(&kvm->tool, NULL, kvm->session);
if (err) {
if (kvm->lost_events)
pr_info("\nLost events: %" PRIu64 "\n\n",
kvm->lost_events);
return err;
}
}
return throttled;
}
static volatile int done;
static void sig_handler(int sig __maybe_unused)
{
done = 1;
}
static int perf_kvm__timerfd_create(struct perf_kvm_stat *kvm)
{
struct itimerspec new_value;
int rc = -1;
kvm->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
if (kvm->timerfd < 0) {
pr_err("timerfd_create failed\n");
goto out;
}
new_value.it_value.tv_sec = kvm->display_time;
new_value.it_value.tv_nsec = 0;
new_value.it_interval.tv_sec = kvm->display_time;
new_value.it_interval.tv_nsec = 0;
if (timerfd_settime(kvm->timerfd, 0, &new_value, NULL) != 0) {
pr_err("timerfd_settime failed: %d\n", errno);
close(kvm->timerfd);
goto out;
}
rc = 0;
out: