多字节数据在内存(或显存)和寄存器上存放顺序

转载请注明原帖地址:http://blog.csdn.net/redline2005/article/details/23339443

多字节数据在内存、显存(N卡)和寄存器(N卡)存放是按照 “高字节->低字节(bit 31 --------> bit 0)”存放的,称为小端或小尾。

例如:

char4类型的数据(4个字节),通过分量名访问:

char4 dog;
dog.x ...     第1个字节

dog.y ....    第2个字节
dog.z .....   第3个字节
doz.w ...... 第4个字节

存放方式:w/z/y/x(这种方式即小尾或小端,N卡是这样)。

另一个例子,例如:

char dog[4];                     
dog[0] = 0x11;
dog[1] = 0x22;
dog[2] = 0x33;
dog[4] = 0x44;

int cat = *(int *)dog;      
cat的值是0x44332211,而不是0x11223344。如果使用分量访问char4中的每个字节,则 可以不管它的存放方式。

如果需要使用绝对位置(例如移位),需要注意:

例如:

uint32_t a = *p; 

 a & 0xff                    第1个字节
(a >> 8) & 0xff          第2个
(a >> 16) & 0xff        第3个
(a >> 24) & 0xff        第4个。

下面是测试代码:


#include "cuda_runtime.h"
#include "device_launch_parameters.h"

#include <stdio.h>

cudaError_t addWithCuda(int *c,char *b, unsigned int size);

__global__ void addKernel(int *c, char *b)
{
    //int i = threadIdx.x;
    //c[i] = a[i] + b[i];
    char dog[4];
 dog[0]=0x12;
 dog[1]=0x34;
 dog[2]=0x56;
 dog[3]=0x78;
 int *temp = (int *)dog;
 c[0] = *temp;  

 //
 int dog2 = 0x12345678;
 char *q = (char *)&dog2;
 b[0] = q[0];
 b[1] = q[1];
 b[2] = q[2];
 b[3] = q[3];
}

int main()
{
    const int arraySize = 5;
    char b[arraySize] = { 0 };
    int c[arraySize] = { 0 };

 printf("cpu : \n");
 char dog[4];
 dog[0]=0x12;dog[1]=0x34;dog[2]=0x56;dog[3]=0x78;
 c[0] = *(int *)dog;
 
 printf("a array dog of char type    : {%x,%x,%x,%x} \n",dog[0],dog[1],dog[2],dog[3]);
 printf("the address of it's element : {%x,%x,%x,%x} \n",&dog[0],&dog[1],&dog[2],&dog[3]);
 printf("we convert the array dog to a int data \n");
    printf("c : %x , address: %x \n",c[0],&c[0]);
 printf("\n");

 int p=0x12345678;//在内存中对应&p对应的地址比如0x04000000开始的12 34 56 78四个字节
    char *q;
    q=(char *)&p;//&p本来是int *类型,强制转换为类型char *。此时q的值为0x04000000
    //此时q[0]==0x12,q[1]==0x34,q[2]==0x56,q[0]==0x78
 printf("a data of type int p = %x\n",p);
 printf("we convert the int data to a array of char type \n");
 printf("char p = {%x,%x,%x,%x} \n",q[0], q[1], q[2], q[3]);
 printf("address of p : %x,%x,%x,%x \n",&q[0], &q[1], &q[2], &q[3]);
 printf("\n");

 printf("gpu : \n");
 c[0] = 0;
    // Add vectors in parallel.
    cudaError_t cudaStatus = addWithCuda(c, b, arraySize);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "addWithCuda failed!");
        return 1;
    }
 printf("array dog : {%x,%x,%x,%x} \n",dog[0],dog[1],dog[2],dog[3]);
    printf("c : %x , address: %x \n",c[0],&c[0]);
 printf("\n");

 printf(" int p : %x \n",0x12345678);
 printf("char p : {%x,%x,%x,%x}",b[0],b[1],b[2],b[3]);


    // cudaDeviceReset must be called before exiting in order for profiling and
    // tracing tools such as Nsight and Visual Profiler to show complete traces.
    cudaStatus = cudaDeviceReset();
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaDeviceReset failed!");
        return 1;
    }
 getchar();
    return 0;
}

// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, char *b, unsigned int size)
{
    char *dev_b = 0;
    int *dev_c = 0;
    cudaError_t cudaStatus;

    // Choose which GPU to run on, change this on a multi-GPU system.
    cudaStatus = cudaSetDevice(0);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaSetDevice failed!  Do you have a CUDA-capable GPU installed?");
        goto Error;
    }

    // Allocate GPU buffers for three vectors (two input, one output)    .
    cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
        goto Error;
    }

    cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(char));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
        goto Error;
    }

    // Launch a kernel on the GPU with one thread for each element.
    addKernel<<<1, 1>>>(dev_c,dev_b);

    // Check for any errors launching the kernel
    cudaStatus = cudaGetLastError();
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
        goto Error;
    }
   
    // cudaDeviceSynchronize waits for the kernel to finish, and returns
    // any errors encountered during the launch.
    cudaStatus = cudaDeviceSynchronize();
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
        goto Error;
    }

    // Copy output vector from GPU buffer to host memory.
    cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy failed!");
        goto Error;
    }
     cudaStatus = cudaMemcpy(b, dev_b, size * sizeof(char), cudaMemcpyDeviceToHost);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy failed!");
        goto Error;
    }

Error:
    cudaFree(dev_c);
    cudaFree(dev_b);
 
    return cudaStatus;
}

 

  • 0
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 打赏
    打赏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

redline2005

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值