serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
1 | #include "includes.h"
__global__ void vecProductKernel(float *d_z, const float *d_x, const float *d_y, unsigned int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
d_z[idx] = d_x[idx] * d_y[idx];
}
} |
2 | #include "includes.h"
__global__ void STREAM_Triad_double(double *a, double *b, double *c, double scalar, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < len) {
c[idx] = a[idx]+scalar*b[idx];
idx += blockDim.x * gridDim.x;
}
} |
3 | #include <iostream>
#include "sys/time.h"
using namespace std;
double timeInSeconds (timeval& starttime, timeval& stopstime) {
return 1e-6*(1e6*(stopstime.tv_sec - starttime.tv_sec) + (stopstime.tv_usec - starttime.tv_usec));
}
__device__ double* dev_vector1 = 0;
__device__ double* dev_vector2 = 0;
__device__ dou... |
4 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
*****************************************************************... |
5 | ///*
// * LinearSysSolver.cpp
// *
// * Created on: Jul 8, 2013
// * Author: adm85
// */
//
//#include <vector>
//#include <iostream>
//#include <time.h>
//#include "LinearSysSolver.h"
//#include "cublas_v2.h"
//#include "cula.h"
//
//
//LinearSysSolver::LinearSysSolver()
//{
// // TODO Auto-generated constructor... |
6 | // Each thread calculates fitness for one individual
// Result: vector of fitness
extern "C"
__global__ void fitness_kernel(int populationCnt, int *population,
int pointsCnt, float *pointsX, float *pointsY, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < populationCnt)
{
... |
7 | #include "cuda_runtime.h"
#include <cstdio>
#include "time.h"
constexpr int segment_size = 1024;
constexpr int threads = 512;
__device__ char *pool;
void __global__ alloc(int **pointers) {
auto index = blockIdx.x * blockDim.x + threadIdx.x;
// pointers[index] = (int *)malloc(segment_size);
pointers[index] = (in... |
8 | #include <algorithm>
#include <iostream>
#include <vector>
std::vector<double> add(std::vector<double> inarr1, std::vector<double> inarr2);
void test_integration()
{
constexpr size_t arr_size = 2 << 24;
std::cout << "Initializing test arrays...\n";
std::vector<double> arr1(arr_size);
std::vector<doub... |
9 | #include "Output_Layer_GPU_Kernels.cuh"
__constant__ float anchors_416[10] = { 1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52 };
__device__ float Sigmoid(float x)
{
float expValue = exp((double)-x);
float result = 1 / (1 + expValue);
return result;
}
__global__ void XY_BoundingBox_Coordinates... |
10 | #include <stdio.h>
#include <cuda_runtime.h>
#include <assert.h>
int main(int argc, char **argv){
float *a_h, *b_h; // Host data
float *a_d, *b_d; // Device data
int N = 14, nBytes, i;
printf("Start allocating\n");
nBytes = N * sizeof(float);
printf("Allocating in Host\n");
a_h = (flo... |
11 | #include <cuda.h>
#define KERNEL_SIZE 3
#define BLOCK_SIZE 512
typedef signed int pixel_channel;
typedef unsigned long resolution;
__constant__ pixel_channel kernel_cuda[KERNEL_SIZE * KERNEL_SIZE];
pixel_channel kernel_host[KERNEL_SIZE * KERNEL_SIZE] = { -1, -1, -1,
-1, 9, -1,
-1, -1, -1 };... |
12 | #include <stdio.h>
__global__ void add(int a, int b, int *c) {
*c = a + b;
}
int main( void ) {
int c;
int *dev_c;
//Device Memory allocations
cudaError_t err = cudaMalloc((void**)&dev_c, sizeof(&dev_c));
if(err != cudaSuccess) {
printf("The error is %s\n", cudaGetErrorString(err));
... |
13 | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <memory>
/*CUDAлȡGPU豸*/
int main(void) {
int device_count = 0;
cudaGetDeviceCount(&device_count);
//ú֧CUDAGPU豸ĸ
if (device_count ==0)
{
printf("There are no available device(s) that support CUDA\n");
... |
14 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <time.h>
#define AND 0
#define OR 1
#define NAND 2
#define NOR 3
#define XOR 4
#define XNOR 5
__global__ void computeLogicGates(char* d_input, char* d_output, int size) {
// calculate the index of the t... |
15 | #include "Matrix.cuh"
#include <cstring>
#include <fstream>
#include <ctime>
#include <device_functions.h>
#ifdef __CUDACC__
#define cuda_SYNCTHREADS() __syncthreads()
#else
#define cuda_SYNCTHREADS()
#endif
#define Zero ZeroCPU
#define PRINT_LOG false
//#define TARGET_RESIDUE ((double)1.0e-9);
const double TARGET_R... |
16 | #include "includes.h"
__global__ void multiply_by_itself_training_util_kernel( const float4 * __restrict input_buf, float4 * __restrict output_buf, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
{
float4 val = input_buf[elem_id];
val.x *= val.x;
val.y *= val.y;
val.z *=... |
17 | #include <algorithm>
#include <iostream>
#include <vector>
typedef unsigned long long data_t;
static inline void check(cudaError_t err, const char* context) {
if (err != cudaSuccess) {
std::cerr << "CUDA error: " << context << ": "
<< cudaGetErrorString(err) << std::endl;
std::exit(... |
18 | #include <iostream>
using namespace std;
#define CUDA_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPU assert: %s %s %d\n", cudaGetErrorString(code), file, line);
... |
19 | extern "C"
__global__ void cuAdd(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] + b[i];
}
}
extern "C"
__global__ void cuMult(int n, float *a, float *b, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
... |
20 | #include<bits/stdc++.h>
using namespace std;
__global__ void vec_add(int N, int *A, int *B, int *C){
int i = threadIdx.x + blockIdx.x * blockDim.x;
// assert( i<N );
if(i < N) C[i] = A[i] + B[i];
}
int main(int argc, char *argv[]){
srand(0);
int N = 10000, block_size = 256;
if(argc>1) N = stoi(ar... |
21 | /*用gpu实现2个矩阵之间的乘法*/
#include<iostream>
#include<stdlib.h>
#include<sys/time.h>
#include<math.h>
#include"cuda_runtime.h"
using namespace std;
#define cols 1024
#define rows 1024
__global__ void multiply(float**Ad,float**Bd,float**Cd)
{
int x = blockDim.x*blockIdx.x+threadIdx.x;
int y = blockDim.y*blockIdx.y+thread... |
22 | #include <stdio.h>
__global__ void firstParallel()
{
printf("This is running in parallel.\n");
}
int main()
{
firstParallel<<<5, 5>>>();
cudaDeviceSynchronize();
}
|
23 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void conv2(float *A, float *kernel,int inputSize, int depth, int kernelSize , int stride, int pad, float *B, int outputSize) {
// 计算元素output(i,j)的值 一次卷积运算
... |
24 | #include "includes.h"
__global__ void __stratifycounts(double *strata, int n, double *a, unsigned int *bi) {
__shared__ unsigned int ic[SNDVALS][SNDGRPS];
__shared__ double ss[SNDVALS];
int istart = (int)(((long long)blockIdx.x) * n / gridDim.x);
int iend = (int)(((long long)(blockIdx.x+1)) * n / gridDim.x);
int bibas... |
25 | //#include <hayai/hayai.hpp>
//
//#include "btree.cuh"
//
//#include "concurrent-xfasttrie-fixture.cu"
//
//using BTREE = gpu::BTree<key_type, mapped_type>;
//using BTreeInsertionFixture = XTrieInsertionFixture<BTREE, Structure::BTREE>;
//using BTreeGetThreadFixture = XTrieGetThreadFixture<BTREE, Structure::BTREE>;
//u... |
26 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define MAX 65535
#define imin(a,b) (a<b?a:b)
const int arr_size =8;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32,(arr_size +threadsPerBlock -1)/threadsPerBlock);
__global__ void kernel(float*... |
27 | #include <stdio.h>
#include <stdlib.h>
#define N 5
#define BR() printf("\n")
#define BRS(str) printf("%s\n",str)
typedef struct {
int top;
int* data;
int stack_size;
}FIFO;
void exec();
void initialize_array(int*);
void print_array(int*);
int main(int argc, char const *argv[]) {
exec();
return 0;
}
// __de... |
28 | // nvcc -arch sm_21 -o test -run --keep --ptxas-options="-v" test.cu
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
__global__ void transpose (int* Input, int* Output) {
}
|
29 | #include <stdio.h>
#include <math.h>
#include <stdlib.h>
//Note that any functions that want to be called from the kernel must be preceeded with __device__
//Function we are integrating
__device__ float myFunction(float x){
return pow(x,4);
}
//Trapezoidal rule calculation
__device__ float trapezoidal(float a, flo... |
30 | #include "cuda_runtime.h"
#include "stdio.h"
#include "stdlib.h"
#include "string.h"
#include "time.h"
#define A_w 50
#define A_h 50
#define B_w 32
#define B_h 32
typedef struct{
int width;
int height;
float * elements;
}Matrix;
// #define
void rightKronecker1(Matrix A, Matrix B, Matrix C){
for(int c_row=0; c_... |
31 | /* Block size X: 32 */
__global__ void fct_ale_b2(const int maxLevels, const double dt, const double fluxEpsilon, const int * __restrict__ nLevels, const double * __restrict__ area_inv, const double * __restrict__ fct_ttf_max, const double * __restrict__ fct_ttf_min, double * __restrict__ fct_plus, double * __restrict_... |
32 | #include "includes.h"
using namespace std;
__global__ void setValue(float *data, int idx, float value) {
if(threadIdx.x == 0) {
data[idx] = value;
}
} |
33 | #include "includes.h"
__device__ float sigmoid(float x) {
return 1.0f / (1 + __expf(-x));
}
__global__ void sigmoidActivationForward(float* Z, float* A, int Z_x_dim, int Z_y_dim) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < Z_x_dim * Z_y_dim) {
A[index] = sigmoid(Z[index]);
}
} |
34 | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <sys/time.h>
// #define NUM_PARTICLES 10000
// #define NUM_ITERATIONS 10000
// int TPB = 16;
#define SEED 10
#define EPSILON 1e-5
typedef struct {
float3 position;
float3 velocity;
} Particle;
// Deterministically generates a "random" float,... |
35 | __device__ void rot_x(float3 *vec, float angle)
{
float tmp;
tmp = vec->y;
vec->y = tmp * cosf(angle) + vec->z * -sinf(angle);
vec->z = tmp * sinf(angle) + vec->z * cosf(angle);
}
__device__ void rot_y(float3 *vec, float angle)
{
float tmp;
tmp = vec->x;
vec->x = tmp * cosf(angle) + vec->z * sinf(... |
36 | #include "includes.h"
__global__ void cuSetupSincKernel_kernel(float *r_filter_, const int i_filtercoef_, const float r_soff_, const float r_wgthgt_, const int i_weight_, const float r_soff_inverse_, const float r_beta_, const float r_decfactor_inverse_, const float r_relfiltlen_inverse_)
{
int i = threadIdx.x + blockD... |
37 | #include "includes.h"
using namespace std;
struct compressed_sparse_column {
int* data;
int* row;
int* column;
int* index_column;
int* index_row_start;
int* index_row_end;
};
struct graph {
compressed_sparse_column* dataset;
bool* roots;
bool* leaves;
bool* singletons;
int vertices;
int edges;
};
__global__ void pr... |
38 | #include "includes.h"
__global__ void gpu_transpo_kernel_naive(u_char *Source, u_char *Resultat, unsigned width, unsigned height){
int j = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.y*blockDim.y + threadIdx.y;
if ((i<0)||(i>=height)||(j<0)||(j>=width)) {}
else {
Resultat[j*height + i] = Source[i*width + j]... |
39 | #include <cstdio>
#include <cstdlib>
#include <time.h>
#include "cuda_timer.cuh"
#define SafeTimerCall(err) __safeTimerCall(err, __FILE__, __LINE__)
inline void __safeTimerCall(cudaError err, const char *file, const int line) {
#pragma warning(push)
#pragma warning(disable: 4127) Prevent warning on do-while(0);
do... |
40 | #include "cuda.h"
typedef long long int64;
__global__ void ReceiveFun(double *out, const double*vx, const double*vy,
const double*sigmaxx, const double*sigmayy, const double*sigmaxy, int64 nt,
const int64 *rcvi, const int64 *rcvj, const int64 *rcvtype, int64 nrcv, int64 NX, int64 NY){
int i = blockIdx.x*blockD... |
41 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <time.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#define X_SIZE 10240
#define Y_SIZE 16384
#define ARRAY_SIZE (X_SIZE*Y_SIZE)
#define BLOCK_SIZE_X 32
#define BLOCK_SIZE_Y 32
#define TIMESTEPS 1000
const char* input_file_na... |
42 | #include "includes.h"
__global__ void elementwise_1D_1D_add(float* in1, float* in2, float* out, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride)
if (tid < size) out[tid] = in1[tid] + in2[tid];
} |
43 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include "cuda.h"
//device function
__global__ void kernelAddVectors(int N, double *a, double *b, double *c) {
int threadid = threadIdx.x; //thread number
int blockid = blockIdx.x; //block number
int Nblock = blockDim.x; //number of ... |
44 | #include <stdio.h>
/*
* ホスト上で配列値を初期化します。
*/
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
/*
* GPU 上で要素を並列で 2 倍にします。
*/
__global__
void doubleElements(int *a, int N)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
a[i] *= 2;
}
}
/*
* ホスト上で... |
45 | /*
============================================================================
Name : LAB3.cu
Author : Kineibe
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream... |
46 | // includes, system
#include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, kernels
#include "vector_reduction_kernel.cu"
// For simplicity, just to get the idea in this MP, we're fixing the problem size to 512 elements.
#define NUM_ELEMEN... |
47 | /*
* Kernel for calulating the element-wise product of two matrices
* m, n --> dimensions of matrices A, B, C
*/
extern "C" {
__global__ void hadamard(int m, int n, double *A, int lda, double *B, int ldb, double *C, int ldc)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + t... |
48 | #include <cuda_runtime.h>
#include <stdio.h>
#include <assert.h>
#include <iostream>
#include <stdlib.h>
#include <unistd.h>
extern "C" __global__
void memcpy_kernel(unsigned char* __restrict__ output, const unsigned char* __restrict__ input){
output += (blockIdx.x<<13)|(threadIdx.x<<2);
input += (blockIdx.x<... |
49 | # include<stdio.h>
__global__ void mykernel()
{
printf("hello world for GPU\n");
}
int main()
{
mykernel<<<1, 10>>>();
cudaDeviceSynchronize();
return 0;
}
|
50 | #include "cuda_runtime.h" // A small gpu volumetric path tracer in 200 lines
#include "device_launch_parameters.h" // Jerry Guo (c) CGV TU Delft
#include "math_constants.h" // Based on smallvpt and cu-smallpt
#include "curand_kernel.h" // Compile: nvcc
#include <stdlib.h> ... |
51 | #include "includes.h"
__global__ void addVectors( float *d_A, float *d_B, float *d_C, int size)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
d_C[i] = d_A[i] + d_B[i];
}
} |
52 | extern "C"
__global__ void calcDir(// Dots props
float* pX,
float* pY,
float* pZ,
//Tree specs
// per Block
int* dotIndexes,
int* stBl0, int* nPtBl0,
int* stBl1, int* nPtBl1,
float* a... |
53 | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
__global__ void vecAdd(float* h_a, float* h_b, float* h_c, int n)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
//check if it is in bound
if(id<n)
h_c[id] = h_a[id]+ h_b[id];
}
int main(int argc, char* argv[])
{
//size of vectors
int n= 1000;
float *h_... |
54 | #include "includes.h"
__global__ void vectorReduce(const float *global_input_data, float *global_output_data, const int numElements)
{
__shared__ float sdata[10];
__shared__ int sindice[10];
int tid = threadIdx.x;
int i = blockIdx.x * (blockDim.x ) + threadIdx.x;
sdata[tid] = global_input_data[i];
sindice[tid] = tid;... |
55 | #include <stdio.h>
#include <math.h>
#include <sys/time.h>
__global__ void convertToFloat(float *d_out, int *d_in){
d_out[threadIdx.x] = (float)d_in[threadIdx.x];
}
double time_diff(struct timeval x , struct timeval y){
double x_ms , y_ms , diff;
x_ms = (double)x.tv_sec*1000000 + (double)x.tv_usec;
y_ms = (do... |
56 | #include <stdio.h>
// Number of threads
#define NT 1024
// Structure to hold the 2D Points
typedef struct
{
double x;
double y;
}
point;
// Structure to store the metric center result
typedef struct
{
double distance;
int pointIndex;
}
result;
// Function to calculate distance between two points
__device__ doub... |
57 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
__global__ void vAdd(int* A, int* B, int* C, int num_elements){
//Posicion del thread
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < num_elements){
C[i] = A[i] + B[i];
}
}
void sumarVectores(int* A, int* B, int* C, in... |
58 | #include <stdio.h>
__global__
void saxpy(int n, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = x[i] + y[i];
}
void cuda_array_culc_add_float(float* x, float* y, int32_t N)
{
float *d_x, *d_y;
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
cuda... |
59 | //
// Created by songzeceng on 2020/11/26.
//
#include "cuda_runtime.h"
#include "stdio.h"
#define N 64
#define TPB 32
float scale(int i, int n) {
return ((float ) i) / (n - 1);
}
__device__ float distance(float x1, float x2) {
return sqrt((x2 - x1) * (x2 - x1));
}
__global__ void distanceKernel(float *d_o... |
60 | #include <iostream>
#include <ctime>
__global__ void matMulKernel(float* matA, float* matB, float* matC, int rows, int cols)
{
dim3 gIdx;
gIdx.y = blockIdx.y * blockDim.y + threadIdx.y;
gIdx.x = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if(gIdx.x < cols && gIdx.y < rows)
{
... |
61 | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and relate... |
62 | extern "C"
{
__global__ void tx1mx_32(const int lengthX, const float *t, const float *x, float *z)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthX)
{
z[i] += t[i]*x[i]*(1.0-x[i]);
}
}
} |
63 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <pthread.h>
#include <unistd.h>
#include <ctype.h>
struct ThreadStruct {
float *a, *b, *c;
int size, elapsed_time;
};
__global__ void vectorMultGPU(float *a, float *b, float *c, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < n)... |
64 | #include <curand.h>
#include <curand_kernel.h>
#define DIM 1600
#define PI 3.14159265
__global__ void Rotate(uchar4 *ptr, unsigned char *R_input, unsigned char *G_input,
unsigned char *B_input, size_t i_size, float a,
unsigned long col, unsigned long row)
{
int x = th... |
65 | inline __device__ float operator*(float3 a, float3 b) {
return a.x * b.x + a.y * b.y + a.z * b.z;
}
inline __device__ float dot(float3 a, float3 b) {
return a.x * b.x + a.y * b.y + a.z * b.z;
}
inline __device__ float3 operator*(float3 a, float b) {
return make_float3(a.x * b, a.y * b, a.z * b);
}
inline __dev... |
66 | /***************************************************************************//**
* \file LHS1.cu
* \author Christopher Minar (minarc@oregonstate.edu)
* \brief kernels to generate the left hand side for the intermediate velocity solve
*/
#include "LHS1.h"
namespace kernels
{
__global__
void LHS1_mid_luo_X(int *row... |
67 | #include <iostream>
#include <math.h>
#include <time.h>
#include <stdlib.h>
#include <random>
#include <vector>
#include <chrono>
#include <deque>
#include <algorithm>
#include <iterator>
#include <curand.h>
#include <curand_kernel.h>
#define BLOCK_SIZE 1024
__global__ void min_reduce(int *arr, const int n) {
int... |
68 | //put C:/Users/molly/Desktop/289Q/project/main.cu
//nvcc -std=c++11 main.cu
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
#include <cooperative_groups.h>
#include <cooperative_groups.h>
// includes, project
#include <cuda.h>
#include <cuda_runtime.h... |
69 | #include "includes.h"
__global__ void vxy_kernel_large(const float* x, float* y, float* result, unsigned int len, unsigned int rowsz) {
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x + rowsz * blockIdx.y;
if (idx < len) result[idx] = x[idx] * y[idx];
} |
70 | #include <stdio.h>
#include <string.h>
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
#define THREADS_PER_BLOCK 20
#de... |
71 | #include <stdlib.h>
#include <stdio.h>
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line)
{
if (cudaS... |
72 | #include <iostream>
#include <cmath>
#include <algorithm>
#include <iomanip>
typedef double Real;
__global__
void add(int n, Real* x, Real* y){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i=index; i<n; i+=stride){
y[i] = x[i] + y[i];
}
}
int main(){
std:... |
73 | #include <stdlib.h>
#include <cuda.h>
#include <stdio.h>
#include <malloc.h>
__host__
void fill_vector(float *V, int len){
float aux = 5.0;
for (int i = 0; i < len; i++) {
V[i] = ((float)rand() / (float)(RAND_MAX)) * aux ;
}
}
__host__
void print(float *V, int len){
for (int i = 0; i < len; i++) {
p... |
74 | #include <iostream>
using namespace std;
__global__ void fnSearch(char *str, char *key, int *res)
{
*res = -1;
if(str[threadIdx.x] == *key)
*res = threadIdx.x;
}
int main(int argc, char *argv[])
{
if (argc != 3)
{
cout << "Usage: charSearch.out STRING KEY" << endl;
exit(1);
}
char *dStr, *dKey;
int *dR... |
75 | float h_A[]= {
0.646300533086186, 0.6891034119322159, 0.5468255896007155, 0.6042228186164886, 0.8659380581803113, 0.6300291449865434, 0.6636944471272259, 0.9882951548595007, 0.6352107108241554, 0.5790636985735749, 0.8804145795069749, 0.9456035439132031, 0.6321246094793169, 0.5520083637849034, 0.8193643662644936, 0.9486... |
76 | #include <stdio.h>
#define ARRAY_SIZE 10000
#define TPB 256
__device__ float saxpy(float x, float y, float a)
{
return a*x+y;
}
__global__ void saxpyKernel(float* x, float* y, float a)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
y[i] = saxpy(x[i], y[i], a);
}
__host__ void saxpyCPU(float* x, float* y... |
77 | #include<iostream>
#include<string>
#include<malloc.h>
#include<fstream>
#include<sstream>
#include<vector>
#include<cmath>
#include<cstdio>
#include<stdlib.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include <map>
#include <iomanip>
#include <sys/time.h>
#include<assert.h>
#define THREADSPERBLOCK 256
#define EPS 0.0... |
78 | #include <cuda.h>
#include <stdio.h>
#include <sys/time.h>
#include <stdio.h>
#define CUDA_CHECK_RETURN(value) \
{ \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSucc... |
79 | #include <stdio.h>
#define N (2048 * 2048)
#define THREADS_PER_BLOCK 512
#define RADIUS 3
__global__ void add(int *in,int *out,int size) {
__shared__ int temp[THREADS_PER_BLOCK + (2*RADIUS)];
int globalIdx = blockIdx.x * blockDim.x + threadIdx.x;
int localIdx = threadIdx.x + RADIUS;
int localSum = 0 ;
tem... |
80 | #include "utils.cuh"
namespace Utils {
////////////////////////////////////////////////////////////////////////////
inline uint iDivUp(uint a, uint b) {
return (a % b != 0) ? (a / b + 1) : (a / b);
}
////////////////////////////////////////////////////////////////////////////
void compu... |
81 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define N = 8
/* Nome: Nathana Facion RA:191079 */
/* Exercicio 7 - Matriz Add */
/* Data: 20/04/2017 */
__global__ void addMatriz(int *A,int *B, int *C, int linhas, int colunas ){
int i = threadIdx.x + blockDim.x*blockIdx.x; // linha
int j = threadIdx.... |
82 | #include "includes.h"
extern "C" {
}
__global__ void reduce_sum_partial(const float* input, float* output, unsigned int len) {
// from http://www.techdarting.com/2014/06/parallel-reduction-in-cuda.html
// Load a segment of the input vector into shared memory
__shared__ float partialSum[2*256];
int globalThreadId = bloc... |
83 | #include "includes.h"
__global__ void adagrad_update_1D_1D(float* x, float* d, float* m, float clip, float lr, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride) {
if (d[tid] > clip) d[tid] = clip;
if (d[tid] < -clip) d[tid] = -clip;
m[tid... |
84 | // This program computer the sum of two N-element vectors using unified memory
// By: Nick from CoffeeBeforeArch
#include <stdio.h>
#include <cassert>
#include <iostream>
using std::cout;
// CUDA kernel for vector addition
// No change when using CUDA unified memory
__global__ void vectorAdd(int *a, int *b, int *c, ... |
85 | #include<stdio.h>
#include<stdlib.h>
#include <sys/time.h>
#define imin(a,b) (a<b?a:b)
const int N = 16777216;
const int TH_B = 512;
const int blocksPerGrid = imin( 32, (N+TH_B-1) / TH_B );
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
long lon... |
86 | #include<iostream>
const int SHARED_MEM_SIZE = 128*sizeof(int);
__global__ void ReverseFunc(int *a, int *r, int N){
__shared__ int sh[SHARED_MEM_SIZE];
int id = threadIdx.x + blockDim.x*blockIdx.x;
sh[threadIdx.x] = a[id];
__syncthreads();
r[id] = sh[blockDim.x-threadIdx.x-1];
}
int main(){
int *a, *r;
int *... |
87 | /*
* purpose: just a demo to show how vector addition can be done on
* the GPU with just a single thread block
* compilation: nvcc ./single_thread_block_vector_addition.cu
* usage: ./a.out
*/
#include <stdio.h>
#define N 100
/*
* GPU kernel
*/
__global__ void VecAdd(float *A,... |
88 | // moveArrays.cu
//
// demonstrates CUDA interface to data allocation on device (GPU)
// and data movement between host (CPU) and device.
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
int main(void)
{
float *a_h, *b_h; // pointers to host memory
float *a_d, *b_d; // pointers to device memory
... |
89 | #include "includes.h"
__global__ void convn_valid_kernel(float *output, float *data, float *kernel, const int H, const int W, const int kH, const int kW) {
// Matrix index
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
// vH, vW stands for valid H and valid W
const int vH = ... |
90 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <iostream>
#define ARRAY_SIZE 1024
#define BLOCK_DIM 1024
using namespace std;
__global__ void fill_histrogram(int *dev_out, int *dev_in)
{
int i = blockIdx.x * bloc... |
91 | /*
============================================================================
Name : lab_1.cu
Author : Boyarskikh_Nikita
Version :
Copyright :
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include... |
92 | #include "includes.h"
__global__ void x33(float* x34, float* x35, float* x36, int x37) {
int x38 = gridDim.x * blockDim.x;
int x39 = threadIdx.x + blockIdx.x * blockDim.x;
while (x39 < x37) {
int x40 = x39;
x36[x40] = x34[x40] / x35[x40];
x39 = x39 + x38;
}
} |
93 | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
using namespace std;
float reduce_cpu(float* data, int* pat){
float sum = 0;
int m = pat[0];
int numElement = pat[1];
for(int i = 0; i < numElement; i++)
{
float prod = 1;
for(int j = 2; j < m+2; j++)
prod *= data[pat[j]*nu... |
94 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float* var... |
95 | #include<stdio.h>
#include<stdlib.h>
#include<unistd.h>
#include<stdbool.h>
#include <cuda.h>
#include <cuda_runtime.h>
extern "C" void allocateMemory(int **arr, int arraySize)
{
cudaMallocManaged(arr, ( (arraySize* sizeof(int))));
}
extern "C" void callCudaFree(int* local)
{
cudaFree(local);
}
//extern void ... |
96 | #include<stdio.h>
#include<stdlib.h>
__global__ void print_gpu(void) {
printf("Houston, we have a problem in section [%d,%d] \
From Apollo 13\n", threadIdx.x,blockIdx.x);
}
int main(void) {
printf("This is Houston. Say again, please. \
From Base\n");
print_gpu<<<2,2>>>();
cudaD... |
97 | #include <curand_kernel.h>
namespace curfil {
namespace gpu {
__global__
void setup_kernel(int seed, curandState *state) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
/* Each thread gets same seed, a different sequence number, no offset */
curand_init(seed, id, 0, &state[id]);
}
__global__
void gener... |
98 |
#include <math.h>
#include <fstream>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime.h>
//Se definen los valores fijos a utilizar en el programa
#define H 288 //Cada bloque manejara 100 datos correspondientes a 5 minutos de mediciones en intervalos de 3 segu... |
99 | #include "includes.h"
const int Nthreads = 1024, maxFR = 100000, NrankMax = 3, nmaxiter = 500, NchanMax = 32;
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////... |
100 | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
/*
To compile:
nvcc -o NishantLinear NishantLinear.cu
./NishantLinear
*/
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
po... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.