学习opencv过程中,使用IPLImage结构,调用数据时定位数据区别width和widthStep: 1.width表示图像的像素个数,也就是图像的水平长度 2.widthStep是保存的数组长度,
//Karl_bmp.h //1.IplImage 2 CBitmap CBitmap * IplImage2CBitmap(const IplImage *pImage) { if( pImage...IplImage *CBitmap2IplImage(const CBitmap *pBitmap) { DIBSECTION ds; pBitmap->GetObject(sizeof...IplImage* hBitmap2Ipl(HBITMAP hBmp) { BITMAP bmp; ::GetObject(hBmp,sizeof(BITMAP),&bmp);//hBmp-...2 HBITMAP HBITMAP IplImage2hBitmap(IplImage* pImg) { BYTE tmp[sizeof(BITMAPINFO)+1024]; BITMAPINFO...,可以先将CBitmap转为BITMAP,再由BITMAP转为IplImage; Cbitmap 转为 bitmap代码 ?
IplImage有两个属性容易导致错误:width和widthStep 前者是表示图像的每行像素数,后者指表示存储一行像素需要的字节数。...* cvCreateImage(CvSize cvSize(int width, int height), int depth, int channels): IplImage *image_33 =...cvCreateImage(cvSize(3, 3), 8, 3); IplImage *image_31 = cvCreateImage(cvSize(3, 3), 8, 1); IplImage...*image_53 = cvCreateImage(cvSize(5, 3), 8, 3); IplImage *image_51= cvCreateImage(cvSize(5, 3), 8,...1); IplImage *image_73 = cvCreateImage(cvSize(7, 3), 8, 3); IplImage *image_71 = cvCreateImage(cvSize
100,没问题,又正确了,再变回动态大小区域,问题又来了…按理说这个大小对我的代码应该没影响…不经意的看了一下每次切的大小,发现切偶数大小rect时时正确的,奇数大小的rect则显示错误,忽然想到貌似IplImage...分别取宽度为奇偶的图片,读这个widthStep参数,果然,偶数的话跟上面计算一样,奇数就会多出一些,那就不难理解为什么会产生错位了.一般对于奇数的width会填充一个RGB,也就是3bytes.那么现在要对IplImage...图像数据进行操作,就要按行取(IplImage的imageData是按照BGRBGRBGR按行存储的),然后每一行顺加一个widthStep了,不能傻傻的按照width x height的二维数组来计算了
IplImage转Mat (1)直接使用Mat的构造函数,原型如下: Mat(const IplImage* img, bool copyData=false); 第一个参数当然是要转换的IplImage...不再有关系,对mat的修改将不会再影响到IplImage。...结构体类型中的imageData成员与Mat类中的date数据进行转换,在IplImage结构体类型,imageDate被定义为: char *imageData; /* Pointer...(1)利用IplImage结构体类型中的imageData成员与Mat类中的date数据进行转换,就像上面说的,Mat可以转IplImage,反过来也是一样的。...("iplImage2", &imgIpl2);
* src, IplImage* dst, int apertureSize=3); void cvSobel (IplImage* src, IplImage* dst, int dx, int dy..., int apertureSize=3); void cvPreCornerDetect (IplImage* img, IplImage* corners, Int apertureSize);...void cvCornerEigenValsAndVecs (IplImage* img, IplImage* eigenvv, int blockSize, int apertureSize=3);...cvGoodFeaturesToTrack (IplImage* image, IplImage* eigImage, IplImage* tempImage, CvPoint2D32f* corners...* src, IplImage* dst, IplFilter filter=IPL_GAUSSIAN_5x5); void cvPyrUp (IplImage* src, IplImage* dst
yc=M01/M00, 其中 Mx_order,y_order=SUMx,y(I(x,y)*x^x_order*y^y_order) */ static int aoiGravityCenter(IplImage...* binary_image(IplImage* src) { // cvThreshold( src, src, 100, 255, CV_THRESH_BINARY );//100 is...the thredhold IplImage* one_channel = cvCreateImage(cvSize(src->width,src->height),IPL_DEPTH_8U,...* src) { int danwei = 255/max; int gray_pixel = weiyi*danwei; cout<<gray_pixel<<endl; IplImage...* src_left; IplImage* src_right; IplImage* draw = cvLoadImage(str_name_left.c_str(),1);//绘制重心的图像
* src =cvLoadImage(INPUT_IMAGE,CV_LOAD_IMAGE_GRAYSCALE); IplImage* dst =cvCreateImage(cvGetSize(src...), src->depth, src->nChannels);//获取原始图像大小 AXI_STREAM src_axi, dst_axi; IplImage2AXIvideo(src,...src_axi); AXIvideo2IplImage(src_axi, dst); cvSaveImage(OUTPUT_IMAGE, dst); cvShowImage(...实验原图1 //方法1cvLoadImage函数加载图片 IplImage* src = cvLoadImage(INPUT_IMAGE,CV_LOAD_IMAGE_GRAYSCALE); ?...实验结果1 //读取视频文件 IplImage *frame; CvCapture *capture = cvCaptureFromAVI("1.avi");//获取视频数据 cvNamedWindow
yc=M01/M00, 其中 Mx_order,y_order=SUMx,y(I(x,y)*x^x_order*y^y_order) */ static int aoiGravityCenter(IplImage...* binary_image(IplImage* src) { // cvThreshold( src, src, 100, 255, CV_THRESH_BINARY );//100 is...the thredhold IplImage* one_channel = cvCreateImage(cvSize(src->width,src->height),IPL_DEPTH_8U,...return one_channel; } int _tmain(int argc, _TCHAR* argv[]) { string str_name = "seg_right.bmp"; IplImage...* src; IplImage* draw = cvLoadImage(str_name.c_str(),1);//绘制重心的图像 if ((src = cvLoadImage(str_name.c_str
1 IplImage* EqualizeHistColorImage(IplImage *pImage) 2 { 3 IplImage *pEquaImage = cvCreateImage...depth, 3); 4 5 // 原图像分成各通道后再均衡化,最后合并即彩色图像的直方图均衡化 6 const int MAX_CHANNEL = 4; 7 IplImage...1 void ChangeTheColor(int pos,IplImage* g_pGrayImage,CvSeq* g_pcvSeq) 2 { 3 // 转为二值图,黑白图 4...IplImage *pBinaryImage = cvCreateImage(cvGetSize(g_pGrayImage), IPL_DEPTH_8U, 1); 5 cvThreshold(...1 void Get_The_Different_From_The_Cctv(IplImage* image1,IplImage* image2){ 2 IplImage* image_1=cvCreateImage
*src=NULL; IplImage *dst=NULL; //定义去雾函数如下 IplImage *quw(IplImage *src,int block,double w) {...//图像分别有三个颜色通道 IplImage *dst1=NULL; IplImage *dst2=NULL; IplImage *dst3=...NULL; IplImage *imgroi1; //dst1的ROI IplImage *imgroi2; //dst2...* doCalculateV(IplImage* w,IplImage* diff,IplImage* smooth) { IplImage* b = cvCreateImage(cvSize(...* doFinally(IplImage* in,IplImage* v,double A) { IplImage* b = cvCreateImage(cvSize(in->width,in->height
* A, IplImage* B) { long N = height * width; int h[256]; double p[256],u[256],w[256]; for(int...argv[1] : “lena.jpg”; IplImage* source = cvLoadImage( filename, 0 ); int Th; height = source->height...*roi = cvCreateImage(cvSize(rect.width,rect.height),IPL_DEPTH_8U,1); IplImage *bw = cvCreateImage(...* A, IplImage* B) { long N = height * width; int h[256]; double p[256],u[256],w[256]; for(int...*roi = cvCreateImage(cvSize(rect.width,rect.height),IPL_DEPTH_8U,1); IplImage *bw = cvCreateImage(
二值化结果如图1.1所示,可以看到图像并不标准,直线粗细也不一,我们尝试用霍夫变换找一下直线,代码如下 void findLines(IplImage* raw, IplImage* dst) { IplImage...* src = cvCloneImage(raw); IplImage* canny = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1); cvCanny...接下来步骤就是在二值化图(图1.1)中去掉这条线,代码如下: void eraseLine(IplImage* src, IplImage* flag) {// flag为图1.2所示的图片,src为图...* raw, IplImage* dst) { IplImage* src = cvCloneImage(raw); // clone the input image IplImage* canny =..._8U, 1); IplImage* dst = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1); IplImage* binary = cvCreateImage
IplImage *pSrcImage = cvLoadImage("pout.jpg", CV_LOAD_IMAGE_UNCHANGED); IplImage *pGrayImage_8U = cvCreateImage...(cvGetSize(pSrcImage), IPL_DEPTH_8U, 1); IplImage *pGrayImage_8U_2 = cvCreateImage(cvGetSize(pSrcImage...), IPL_DEPTH_8U, 1); IplImage *pGrayImage_64F=cvCreateImage(cvGetSize(pSrcImage), IPL_DEPTH_64F, 1)...cvConvertScale(pGrayImage_64F, pGrayImage_8U_2) //64F转8U 补充知识:OpenCV中利用cvConvertScale()对图像数据作线性变换~ 在OpenCV的IplImage...*pSrcImage = cvLoadImage("coins.png", CV_LOAD_IMAGE_UNCHANGED); //创建输出的图像 IplImage *pOutImage = cvCreateImage
大家好,又见面了,我是全栈君 Intel Image Processing Library (IPL) 1 typedef struct _IplImage 2 { 3 int nSize; /*...IplImage大小 */ 4 int ID; /* 版本 (=0)*/ 5 int nChannels; /* 大多数OPENCV函数支持1,2,3 或 4 个通道 */ 6 int alphaChannel...CvLoadImage(cFileName, 0); // 强制转化读取图像为灰度图 CvLoadImage(cFileName, 1); // 读取彩色图 CvCloneImage IplImage...* CvCloneImage(const IplImage *p) 在使用函数之前,不用特地开辟内存,即该函数会自己开一段内存,然后复制好image里面的数据,然后把这段内存中的数据返回....CreateCvImage IplImage* cvCreateImage(CvSize size, int depth, int channels) 作用申请一个图片结构大小的内存,比如创建灰色图像
(image,dst,Size(3,3),0); cv::namedWindow("高斯模糊图", WINDOW_AUTOSIZE); // 创建一个窗 qImg = IplImage...IplImage qImg = IplImage(dst); // cv::Mat -> IplImage cvSaveImage("C://Users//junyi.pc//Desktop//temp.jpg...(dst); // cv::Mat -> IplImage cvSaveImage("C://Users//junyi.pc//Desktop//temp.jpg", &qImg)...(dst); // cv::Mat -> IplImage cvSaveImage("C://Users//junyi.pc//Desktop//temp.jpg", &qImg)...(image); // cv::Mat -> IplImage cvSaveImage("C://Users//junyi.pc//Desktop//temp.jpg", &qImg
一直以为IplImage结构体中的widthStep元素大小等于width*nChannels,大错特错!...*image_33 = cvCreateImage(cvSize(3, 3), 8, 3); IplImage *image_31 = cvCreateImage(cvSize(3, 3), 8, 1...); IplImage *image_53 = cvCreateImage(cvSize(5, 3), 8, 3); IplImage *image_51= cvCreateImage(cvSize(5..., 3), 8, 1); IplImage *image_73 = cvCreateImage(cvSize(7, 3), 8, 3); IplImage *image_71 = cvCreateImage...widthStep大小对IplImage极为重要,在cxarray.cpp中,我们可以找到如下代码行: image->imageSize = image->widthStep * image->height
void cvSetImageROI( IplImage* p_w_picpath, CvRect rect ); void cvResetImageROI( IplImage* p_w_picpath...3-12:用p_w_picpathROI来增加某范围的像素 // roi_add #include #include intmain(intargc,char** argv) { IplImage...例3-13:利用其他widthStep方法把interest_img的所有像素值增加1 // Assuming IplImage *interest_img; and // CvRect interest_rect...; // Use widthStep to get a region of interest // // (Alternate method) // IplImage *sub_img = cvCreateImageHeader
static CvHidHaarClassifierCascade* hid_cascade = 0; #define WINNAME "Result" void detect_and_draw( IplImage...argv[1][0] - '0' : 0 ); else if( argc == 2 ) capture = cvCaptureFromAVI( argv[1] ); if( capture ) { IplImage...argv[1] : (char*)"lena.jpg"; IplImage* image = cvLoadImage( filename, 1 ); IplImage* temp = cvCreateImage...cvReleaseImage( &temp ); } cvDestroyWindow(WINNAME); return 0; } return 0; } void detect_and_draw( IplImage...* img, IplImage* temp ) { int scale = 2; CvPoint pt1, pt2; int i; cvPyrDown( img, temp, CV_GAUSSIAN_5x5
* img,IplImage* hist_img,const char* pstrWndName) { CvHistogram* hist = NULL; int bin_count = 256...* b = cvCreateImage(img_size,8,1); IplImage* g = cvCreateImage(img_size,8,1); IplImage* r = cvCreateImage...* b_hist_img = cvCreateImage(size,8,1); IplImage* g_hist_img = cvCreateImage(size,8,1); IplImage* r_hist_img...* src= cvLoadImage("F:\\test3.jpg"); IplImage* hsv = cvCreateImage( cvGetSize(src), 8, 3 ); IplImage..., 8, 1 ); IplImage* v_plane = cvCreateImage( cvGetSize(src), 8, 1 ); IplImage* planes[] = { h_plane
领取专属 10元无门槛券
手把手带您无忧上云