NVIDIA CUDA clock.cu 代码
//VC++ 2010 + CUDA 4.1
#ifndef _CLOCK_KERNEL_H_
#define _CLOCK_KERNEL_H_
// This kernel computes a standard parallel reduction and evaluates the
// time it takes to do that for each block. The timing results are stored
// in device memory.
__global__ static void timedReduction(const float * input, float * output, clock_t * timer)
{
// __shared__ float shared[2 * blockDim.x];
extern __shared__ float shared[];
const int tid = threadIdx.x;
const int bid = blockIdx.x;
if (tid == 0) timer[bid] = clock();
// Copy input.
shared[tid] = input[tid];
shared[tid + blockDim.x] = input[tid + blockDim.x];
// Perform reduction to find minimum.
for(int d = blockDim.x; d > 0; d /= 2)
{
__syncthreads();
if (tid < d)
{
float f0 = shared[tid];
float f1 = shared[tid + d];
if (f1 < f0) {
shared[tid] = f1;
}
}
}
// Write result.
if (tid == 0) output[bid] = shared[0];
__syncthreads();
if (tid == 0) timer[bid+gridDim.x] = clock();
}
#endif // _CLOCK_KERNEL_H_
#include <stdlib.h>
#include <shrQATest.h>
#include <cutil_inline.h>
#include "clock_kernel.cu"
// This example shows how to use the clock function to measure the performance of
// a kernel accurately.
//
// Blocks are executed in parallel and out of order. Since there's no synchronization
// mechanism between blocks, we measure the clock once for each block. The clock
// samples are written to device memory.
#define NUM_BLOCKS 64
#define NUM_THREADS 256
// It's interesting to change the number of blocks and the number of threads to
// understand how to keep the hardware busy.
//
// Here are some numbers I get on my G80:
// blocks - clocks
// 1 - 3096
// 8 - 3232
// 16 - 3364
// 32 - 4615
// 64 - 9981
//
// With less than 16 blocks some of the multiprocessors of the device are idle. With
// more than 16 you are using all the multiprocessors, but there's only one block per
// multiprocessor and that doesn't allow you to hide the latency of the memory. With
// more than 32 the speed scales linearly.
int main(int argc, char** argv)
{
shrQAStart(argc, argv);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if ( cutCheckCmdLineFlag(argc, (const char **)argv, "device")) {
int devID = cutilDeviceInit(argc, argv);
if (devID < 0) {
printf("No CUDA Capable devices found, exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_WAIVED);
}
} else {
cudaSetDevice( cutGetMaxGflopsDeviceId() );
}
float * dinput = NULL;
float * doutput = NULL;
clock_t * dtimer = NULL;
clock_t timer[NUM_BLOCKS * 2];
float input[NUM_THREADS * 2];
for (int i = 0; i < NUM_THREADS * 2; i++)
{
input[i] = (float)i;
}
cutilSafeCall(cudaMalloc((void**)&dinput, sizeof(float) * NUM_THREADS * 2));
cutilSafeCall(cudaMalloc((void**)&doutput, sizeof(float) * NUM_BLOCKS));
cutilSafeCall(cudaMalloc((void**)&dtimer, sizeof(clock_t) * NUM_BLOCKS * 2));
cutilSafeCall(cudaMemcpy(dinput, input, sizeof(float) * NUM_THREADS * 2, cudaMemcpyHostToDevice));
timedReduction<<<NUM_BLOCKS, NUM_THREADS, sizeof(float) * 2 * NUM_THREADS>>>(dinput, doutput, dtimer);
//cutilSafeCall(cudaMemcpy(output, doutput, sizeof(float) * NUM_BLOCKS, cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(timer, dtimer, sizeof(clock_t) * NUM_BLOCKS * 2, cudaMemcpyDeviceToHost));
cutilSafeCall(cudaFree(dinput));
cutilSafeCall(cudaFree(doutput));
cutilSafeCall(cudaFree(dtimer));
// Compute the difference between the last block end and the first block start.
clock_t minStart = timer[0];
clock_t maxEnd = timer[NUM_BLOCKS];
for (int i = 1; i < NUM_BLOCKS; i++)
{
minStart = timer[i] < minStart ? timer[i] : minStart;
maxEnd = timer[NUM_BLOCKS+i] > maxEnd ? timer[NUM_BLOCKS+i] : maxEnd;
}
printf("time = %d\n", maxEnd - minStart);
cutilDeviceReset();
// This test always passes.
shrQAFinishExit(argc, (const char **)argv, QA_PASSED);
}
上一篇: 《深入理解Windows操作系统》笔记1
下一篇: clearcase、Git之我见
推荐阅读
-
(解决某些疑难杂症)Ubuntu16.04 + NVIDIA显卡驱动 + cuda10 + cudnn 安装教程
-
5nm+CUDA数量翻倍!NVIDIA新一代GPU太凶猛
-
NVIDIA CUDA开放支持ARM架构!冲击百亿亿次超算
-
Intel核显能开启NVIDIA CUDA加速了!跑分高了52%
-
NVIDIA、苹果彻底决裂:CUDA不再支持macOS
-
NVIDIA GPU核心CUDA要开源?黄仁勋:绝无可能
-
CUDA编程(一):QT中编写cuda代码
-
GA102再砍一刀 NVIDIA发布RTX A4500显卡:7168个CUDA核心
-
CUDA从入门到精通到精通_笔记4:GPU设备属性查询的代码
-
Ubuntu18.04 安装 显卡驱动 Nvidia Driver CUDA CUDNN 与GPU 版本的Pytorch