第一,对图片的重绘
-(UIImage *)reSizeImage:(UIImage *)image toSize:(CGSize)reSize {
UIGraphicsBeginImageContext(CGSizeMake(reSize.width,reSize.height));
[image drawInRect:CGRectMake(0, 0, reSize.width,reSize.height)];
UIImage *reSizeImage =UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return reSizeImage;
}
此方法是对图片按照一定的比例进行重绘,无需先移除再重新添加另外一个大小的imageView
优点:重绘图片可以减小图片的大小,并提高图片的清晰度,是对图片的处理中最常用的方法
第二,截取原图片固定比例的图片
A.第一种方法
- (UIImage *)getCutImageSize:(CGSize)size originalImage:(UIImage *)originalImage{
originalImage = [self equalScaleCompressImage:originalImage size:size];
CGImageRef imageRef = originalImage.CGImage;
CGImageRef cutImageRef = CGImageCreateWithImageInRect(imageRef, [self getCutRectWithBigSize:originalImage.size cutRect:size]);
UIGraphicsBeginImageContext(size);
CGContextRef context = UIGraphicsGetCurrentContext();
CGContextDrawImage(context, [self getCutRectWithBigSize:originalImage.size cutRect:size], cutImageRef);
UIImage *cutImage = [UIImage imageWithCGImage:cutImageRef];
UIGraphicsEndImageContext();
return cutImage;
}
//获取截图区域(从中心算起)
- (CGRect)getCutRectWithBigSize:(CGSize)bigSize cutRect:(CGSize)cutSize{
CGPoint bigPoint = CGPointMake(bigSize.width / 2.0f, bigSize.height / 2.0f);
先获取中心点,再取得截图的rect
CGRect Rect = CGRectMake(bigPoint.x - cutSize.width / 2.0f, bigPoint.y - cutSize.height / 2.0f, cutSize.width, cutSize.height);
return Rect;
}
//等比压缩图片
//如果自动适应image的size 以宽、高最大值为主
- (UIImage *)equalScaleCompressImage:(UIImage *)bigImage size:(CGSize)size{
CGFloat scale = [self getCompressScaleWithBigSize:bigImage.size smallSize:size];
UIGraphicsBeginImageContext(CGSizeMake(bigImage.size.width * scale, bigImage.size.height * scale));
[bigImage drawInRect:CGRectMake(0, 0, bigImage.size.width * scale, bigImage.size.height * scale)];
UIImage *scaledImage =UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return scaledImage;
}
//获取压缩比scale
- (CGFloat)getCompressScaleWithBigSize:(CGSize)bigSize smallSize:(CGSize)smallSize{
CGFloat scale;
if (bigSize.height / bigSize.width >= smallSize.height / smallSize.width) {
scale = smallSize.width / bigSize.width;
}else{
scale = smallSize.height / bigSize.height;
}
return scale;
}
B.第二种方法
//原图片
UIImage * img = [UIImage imageNamed:@"ly3.jpg"];
CGFloat imgWidth = CGImageGetWidth(img.CGImage);
CGFloat imgHeight = CGImageGetHeight(img.CGImage);
//转化为位图
CGImageRef temImg = img.CGImage;
//根据范围截图
temImg=CGImageCreateWithImageInRect(temImg, CGRectMake(0, 0, 100, 100));
//得到新的图片
UIImage *new = [UIImage imageWithCGImage:temImg];
//释放位图对象
CGImageRelease(temImg);
UIImageView * oldImageView = [[UIImageView alloc]initWithFrame:CGRectMake(0, 20, imgWidth, imgHeight)];
oldImageView.contentMode = UIViewContentModeScaleAspectFill;
oldImageView.image = img;
[self.view addSubview:oldImageView];
第三,图片的模糊效果
UIToolbar *toolBar = [[UIToolbar alloc]initWithFrame:CGRectMake(width/2, 0, width, height)];
toolBar.barStyle = UIBarStyleDefault;
toolBar.translucent = YES;
[bgImageView addSubview:toolBar];
最常用
UIBlurEffect *effect = [UIBlurEffect effectWithStyle:UIBlurEffectStyleLight];
UIVisualEffectView *effectView = [[UIVisualEffectView alloc]initWithEffect:effect];
effectView.frame = CGRectMake(width/2, 0, width, height);
[bgImageView addSubview:effectView];
第四种,高斯模糊
//效果:第一种Core Image设置模糊之后会在周围产生白边,vImage使用不存在任何问题;
//性能:图像模糊处理属于复杂的计算,大部分图片模糊选择的是vImage,性能最佳
//blur变模糊
+(UIImage *)coreBlurImage:(UIImage *)image withBlurNumber:(CGFloat)blur{
// CIContext 是一个对象,通过它Core Image可以绘制一个CIFilter产生的结果。一个Core Image Context可以基于CPU或GPU
CIContext *context = [CIContext contextWithOptions:nil];
//CIImage 是一个不变对象,代表一个图片。你可以通过图像数据,或者通过文件,或者另一个CIFilter对象的输出得到一个CIImage。
CIImage *inputImage = [CIImage imageWithCGImage:image.CGImage];
//CIFilter 是一个可变对象,代表一种效果。一个滤镜对象至少要有一个输入参数,并产生一个输出图片 CIGaussianBlur 高斯模糊
//设置filter
CIFilter *filter = [CIFilter filterWithName:@"CIGaussianBlur"];
[filter setValue:inputImage forKey:kCIInputImageKey];
[filter setValue:@(blur) forKey:@"inputRadius"];
CIImage *result = [filter valueForKey:kCIOutputImageKey];
CGImageRef outImage = [context createCGImage:result fromRect:[result extent]];
UIImage *blurImage = [UIImage imageWithCGImage:outImage];
//释放掉CGImageRef
CGImageRelease(outImage);
return blurImage;
}
//vImage属于Accelerate.Framework,需要导入 Accelerate下的 Accelerate头文件, Accelerate主要是用来做数字信号处理、图像处理相关的向量、矩阵运算的库。图像可以认为是由向量或者矩阵数据构成的,Accelerate里既然提供了高效的数学运算API,自然就能方便我们对图像做各种各样的处理 ,模糊算法使用的是vImageBoxConvolve_ARGB8888这个函数。
//大部分图片模糊选择的是vImage,性能最佳
+(UIImage *)boxblurImage:(UIImage *)image withBlurNumber:(CGFloat)blur
{
if (blur<0||blur>1) {
blur = 0.5;
}
int boxSize = (int)(blur * 40);
boxSize = boxSize - (boxSize % 2) + 1;
CGImageRef img = image.CGImage;
vImage_Buffer inBuffer, outBuffer;
vImage_Error error;
void *pixelBuffer;
CGDataProviderRef inProvide = CGImageGetDataProvider(img);
CFDataRef inBitmapData = CGDataProviderCopyData(inProvide);
inBuffer.width = CGImageGetWidth(img);
inBuffer.height = CGImageGetHeight(img);
inBuffer.rowBytes = CGImageGetBytesPerRow(img);
inBuffer.data = (void*)CFDataGetBytePtr(inBitmapData);
pixelBuffer = malloc(CGImageGetBytesPerRow(img) * CGImageGetHeight(img));
if (pixelBuffer == NULL) {
NSLog(@"No pixelbuffer");
}
outBuffer.data = pixelBuffer;
outBuffer.width = CGImageGetWidth(img);
outBuffer.height = CGImageGetHeight(img);
outBuffer.rowBytes = CGImageGetBytesPerRow(img);
error = vImageBoxConvolve_ARGB8888(&inBuffer, &outBuffer, NULL, 0, 0, boxSize, boxSize, NULL, kvImageEdgeExtend);
if (error) {
NSLog(@"error from convolution %ld",error);
}
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef ctx = CGBitmapContextCreate(outBuffer.data, outBuffer.width, outBuffer.height, 8, outBuffer.rowBytes, colorSpace, kCGImageAlphaNoneSkipLast);
CGImageRef imageRef = CGBitmapContextCreateImage(ctx);
UIImage *returnImage = [UIImage imageWithCGImage:imageRef];
CGContextRelease(ctx);
CGColorSpaceRelease(colorSpace);
free(pixelBuffer);
CFRelease(inBitmapData);
CGColorSpaceRelease(colorSpace);
CGImageRelease(imageRef);
return returnImage;
}