Linux中计算特定CPU使用率案例详解
linux中计算特定cpu使用率 需求解决方案拓展参考
需求
在linux中可以通过top指令查看某一进程占用的cpu情况,也可以查看某一个cpu使用率情况(先top指令,然后按数字“1”键即可显示每一个cpu的使用情况),如下图:
而我们的需求是:如何得到一个cpu的占用率呢?
解决方案
1. 背景知识
在/proc/stat中可以查看每一个cpu的使用情况的,如下图:
其中cpu(0/1/2/…)后面的那十个数字含义如下:
/proc/stat kernel/system statistics. varies with architecture. common entries include: user nice system idle iowait irq softirq steal guest guest_nice cpu 4705 356 584 3699 23 23 0 0 0 0 cpu0 1393280 32966 572056 13343292 6130 0 17875 0 23933 0 the amount of time, measured in units of user_hz (1/100ths of a second on most architectures, use sysconf(_sc_clk_tck) to obtain the right value), that the system ("cpu" line) or the specific cpu ("cpun" line) spent in various states: user (1) time spent in user mode. nice (2) time spent in user mode with low priority (nice). system (3) time spent in system mode. idle (4) time spent in the idle task. this value should be user_hz times the second entry in the /proc/uptime pseudo-file. iowait (since linux 2.5.41) (5) time waiting for i/o to complete. this value is not reliable, for the following rea‐ sons: 1. the cpu will not wait for i/o to complete; iowait is the time that a task is waiting for i/o to complete. when a cpu goes into idle state for outstanding task i/o, another task will be scheduled on this cpu. 2. on a multi-core cpu, the task waiting for i/o to complete is not running on any cpu, so the iowait of each cpu is difficult to calculate. 3. the value in this field may decrease in cer‐ tain conditions. irq (since linux 2.6.0-test4) (6) time servicing interrupts. softirq (since linux 2.6.0-test4) (7) time servicing softirqs. steal (since linux 2.6.11) (8) stolen time, which is the time spent in other operating systems when running in a virtu‐ alized environment guest (since linux 2.6.24) (9) time spent running a virtual cpu for guest operating systems under the control of the linux kernel. guest_nice (since linux 2.6.33) (10) time spent running a niced guest (virtual cpu for guest operating systems under the con‐ trol of the linux kernel).
2.计算具体cpu使用率
有了上面的背景知识,接下来我们就可以计算具体cpu的使用情况了。具体计算方式如下:
total cpu time since boot = user+nice+system+idle+iowait+irq+softirq+steal total cpu idle time since boot = idle + iowait total cpu usage time since boot = total cpu time since boot - total cpu idle time since boot total cpu percentage = total cpu usage time since boot/total cpu time since boot * 100%
有了上面的计算公式,计算某一cpu使用率或者系统总的cpu占用率也就是不难了。
示例:计算系统整体cpu占用情况
首先从/proc/stat中获取 t1时刻系统总体的user、nice、system、idle、iowait、irq、softirq、steal、guest、guest_nice的值,得到此时total cpu time since boot(记为total1)和 total cpu idle time since boot(记为idle1)。
其次,从/proc/stat中获取t2时刻系统总的total cpu time since boot(记为total2)和total cpu idle time since boot(记为idle2)。(方法同上一步)
最后,计算t2与t1之间系统总的cpu使用情况。也就是:
cpu percentage between t1 and t2 = ((total2-total1)-(idle2-idle1))/(total2-total1)* 100%
其中, ((total2-total1)-(idle2-idle1))实际上就是t1与t2时刻之间系统cpu被占用的时间(总时间 - 空闲时间)。
下面是一段计算时间段内cpu被占用情况的脚本:
#!/bin/bash # by paul colby (http://colby.id.au), no rights reserved ;) prev_total=0 prev_idle=0 while true; do # get the total cpu statistics, discarding the 'cpu ' prefix. cpu=(`sed -n 's/^cpu\s//p' /proc/stat`) idle=${cpu[3]} # just the idle cpu time. # calculate the total cpu time. total=0 for value in "${cpu[@]}"; do let "total=$total+$value" done # calculate the cpu usage since we last checked. let "diff_idle=$idle-$prev_idle" let "diff_total=$total-$prev_total" let "diff_usage=(1000*($diff_total-$diff_idle)/$diff_total+5)/10" echo -en "\rcpu: $diff_usage% \b\b" # remember the total and idle cpu times for the next check. prev_total="$total" prev_idle="$idle" # wait before checking again. sleep 1 done
拓展
在内核中,关于/proc/stat中文件的实现函数如下:
附注:内核版本3.14.69,文件为 /fs/proc/stat.c #include <linux/cpumask.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/irqnr.h> #include <asm/cputime.h> #include <linux/tick.h> #ifndef arch_irq_stat_cpu #define arch_irq_stat_cpu(cpu) 0 #endif #ifndef arch_irq_stat #define arch_irq_stat() 0 #endif #ifdef arch_idle_time static cputime64_t get_idle_time(int cpu) { cputime64_t idle; idle = kcpustat_cpu(cpu).cpustat[cputime_idle]; if (cpu_online(cpu) && !nr_iowait_cpu(cpu)) idle += arch_idle_time(cpu); return idle; } static cputime64_t get_iowait_time(int cpu) { cputime64_t iowait; iowait = kcpustat_cpu(cpu).cpustat[cputime_iowait]; if (cpu_online(cpu) && nr_iowait_cpu(cpu)) iowait += arch_idle_time(cpu); return iowait; } #else static u64 get_idle_time(int cpu) { u64 idle, idle_time = -1ull; if (cpu_online(cpu)) idle_time = get_cpu_idle_time_us(cpu, null); if (idle_time == -1ull) /* !no_hz or cpu offline so we can rely on cpustat.idle */ idle = kcpustat_cpu(cpu).cpustat[cputime_idle]; else idle = usecs_to_cputime64(idle_time); return idle; } static u64 get_iowait_time(int cpu) { u64 iowait, iowait_time = -1ull; if (cpu_online(cpu)) iowait_time = get_cpu_iowait_time_us(cpu, null); if (iowait_time == -1ull) /* !no_hz or cpu offline so we can rely on cpustat.iowait */ iowait = kcpustat_cpu(cpu).cpustat[cputime_iowait]; else iowait = usecs_to_cputime64(iowait_time); return iowait; } #endif static int show_stat(struct seq_file *p, void *v) { int i, j; unsigned long jif; u64 user, nice, system, idle, iowait, irq, softirq, steal; u64 guest, guest_nice; u64 sum = 0; u64 sum_softirq = 0; unsigned int per_softirq_sums[nr_softirqs] = {0}; struct timespec boottime; user = nice = system = idle = iowait = irq = softirq = steal = 0; guest = guest_nice = 0; getboottime(&boottime); jif = boottime.tv_sec; for_each_possible_cpu(i) { user += kcpustat_cpu(i).cpustat[cputime_user]; nice += kcpustat_cpu(i).cpustat[cputime_nice]; system += kcpustat_cpu(i).cpustat[cputime_system]; idle += get_idle_time(i); iowait += get_iowait_time(i); irq += kcpustat_cpu(i).cpustat[cputime_irq]; softirq += kcpustat_cpu(i).cpustat[cputime_softirq]; steal += kcpustat_cpu(i).cpustat[cputime_steal]; guest += kcpustat_cpu(i).cpustat[cputime_guest]; guest_nice += kcpustat_cpu(i).cpustat[cputime_guest_nice]; sum += kstat_cpu_irqs_sum(i); sum += arch_irq_stat_cpu(i); for (j = 0; j < nr_softirqs; j++) { unsigned int softirq_stat = kstat_softirqs_cpu(j, i); per_softirq_sums[j] += softirq_stat; sum_softirq += softirq_stat; } } sum += arch_irq_stat(); seq_puts(p, "cpu "); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); seq_putc(p, '\n'); for_each_online_cpu(i) { /* copy values here to work around gcc-2.95.3, gcc-2.96 */ user = kcpustat_cpu(i).cpustat[cputime_user]; nice = kcpustat_cpu(i).cpustat[cputime_nice]; system = kcpustat_cpu(i).cpustat[cputime_system]; idle = get_idle_time(i); iowait = get_iowait_time(i); irq = kcpustat_cpu(i).cpustat[cputime_irq]; softirq = kcpustat_cpu(i).cpustat[cputime_softirq]; steal = kcpustat_cpu(i).cpustat[cputime_steal]; guest = kcpustat_cpu(i).cpustat[cputime_guest]; guest_nice = kcpustat_cpu(i).cpustat[cputime_guest_nice]; seq_printf(p, "cpu%d", i); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); seq_putc(p, '\n'); } seq_printf(p, "intr %llu", (unsigned long long)sum); /* sum again ? it could be updated? */ for_each_irq_nr(j) seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j)); seq_printf(p, "\nctxt %llu\n" "btime %lu\n" "processes %lu\n" "procs_running %lu\n" "procs_blocked %lu\n", nr_context_switches(), (unsigned long)jif, total_forks, nr_running(), nr_iowait()); seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq); for (i = 0; i < nr_softirqs; i++) seq_put_decimal_ull(p, ' ', per_softirq_sums[i]); seq_putc(p, '\n'); return 0; } static int stat_open(struct inode *inode, struct file *file) { size_t size = 1024 + 128 * num_possible_cpus(); char *buf; struct seq_file *m; int res; /* minimum size to display an interrupt count : 2 bytes */ size += 2 * nr_irqs; /* don't ask for more than the kmalloc() max size */ if (size > kmalloc_max_size) size = kmalloc_max_size; buf = kmalloc(size, gfp_kernel); if (!buf) return -enomem; res = single_open(file, show_stat, null); if (!res) { m = file->private_data; m->buf = buf; m->size = ksize(buf); } else kfree(buf); return res; } static const struct file_operations proc_stat_operations = { .open = stat_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init proc_stat_init(void) { proc_create("stat", 0, null, &proc_stat_operations); return 0; } fs_initcall(proc_stat_init);
参考
http://man7.org/linux/man-pages/man5/proc.5.html
https://github.com/pcolby/scripts/blob/master/cpu.sh
https://elixir.bootlin.com/linux/v3.14.69/source/fs/proc/stat.c
到此这篇关于linux中计算特定cpu使用率案例详解的文章就介绍到这了,更多相关linux中计算特定cpu使用率内容请搜索以前的文章或继续浏览下面的相关文章,希望大家以后多多支持!