固定/自定义图片 取色
程序员文章站
2022-07-13 14:28:35
...
固定图片取色
-(void)touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event
{
//触点对象
UITouch *touch=touches.anyObject;
//触点位置
CGPoint touchLocation=[touch locationInView:self.colorImage];
// self.pointImg.frame = CGRectMake(self.colorImage.frame.origin.x + touchLocation.x,self.colorImage.frame.origin.y + touchLocation.y, 5, 5);
self.pointImg.center = CGPointMake(self.colorImage.frame.origin.x + touchLocation.x, self.colorImage.frame.origin.y + touchLocation.y);
NSLog(@"%f %f",touchLocation.x,touchLocation.y);
//触点颜色
UIColor *positionColor=[self getPixelColorAtLocation:touchLocation];
NSLog(@"====%@",positionColor);
self.colorView.backgroundColor = positionColor;
}
- (UIColor*) getPixelColorAtLocation:(CGPoint)point
{
UIColor* color = [UIColor whiteColor];
if (point.x < self.colorImage.frame.size.width && point.x > 0 && point.y < self.colorImage.frame.size.height && point.y > 0) {
UIImageView *colorImageView=self.colorImage;
CGImageRef inImage = colorImageView.image.CGImage;
// Create off screen bitmap context to draw the image into. Format ARGB is 4 bytes for each pixel: Alpa, Red, Green, Blue
CGContextRef cgctx = [self createARGBBitmapContextFromImage:inImage];
if (cgctx == NULL)
{
return nil;
}
size_t w = CGImageGetWidth(inImage);
size_t h = CGImageGetHeight(inImage);
CGRect rect = {{0,0},{w,h}};
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(cgctx, rect, inImage);
// Now we can get a pointer to the image data associated with the bitmap
// context.
unsigned char* data = CGBitmapContextGetData (cgctx);
if (data != NULL)
{
//offset locates the pixel in the data from x,y.
//4 for 4 bytes of data per pixel, w is width of one row of data.
@try
{
// int offset = 4*((w*round(point.y * self.scale))+round(point.x * self.scale));
int offset = 4*((w*round(point.y ))+round(point.x));
//NSLog(@"offset: %d", offset);
int alpha = data[offset];
int red = data[offset+1];
int green = data[offset+2];
int blue = data[offset+3];
NSLog(@"offset: %i colors: RGB A %i %i %i %i",offset,red,green,blue,alpha);
color = [UIColor colorWithRed:(red/255.0f) green:(green/255.0f) blue:(blue/255.0f) alpha:(alpha/255.0f)];
}
@catch (NSException * e)
{
NSLog(@"%@",[e reason]);
}
@finally
{
}
}
// When finished, release the context
CGContextRelease(cgctx);
// Free image data memory for the context
if (data)
{
free(data);
}
}
return color;
}
- (CGContextRef) createARGBBitmapContextFromImage:(CGImageRef) inImage
{
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
void * bitmapData;
int bitmapByteCount;
int bitmapBytesPerRow;
// Get image width, height. We'll use the entire image.
size_t pixelsWide = CGImageGetWidth(inImage);
size_t pixelsHigh = CGImageGetHeight(inImage);
// Declare the number of bytes per row. Each pixel in the bitmap in this
// example is represented by 4 bytes; 8 bits each of red, green, blue, and
// alpha.
bitmapBytesPerRow = (int)(pixelsWide * 4);
bitmapByteCount =(int)(bitmapBytesPerRow * pixelsHigh);
// Use the generic RGB color space.
colorSpace = CGColorSpaceCreateDeviceRGB();
if (colorSpace == NULL)
{
fprintf(stderr, "Error allocating color space\n");
return NULL;
}
// Allocate memory for image data. This is the destination in memory
// where any drawing to the bitmap context will be rendered.
bitmapData = malloc( bitmapByteCount );
if (bitmapData == NULL)
{
fprintf (stderr, "Memory not allocated!");
CGColorSpaceRelease( colorSpace );
return NULL;
}
// Create the bitmap context. We want pre-multiplied ARGB, 8-bits
// per component. Regardless of what the source image format is
// (CMYK, Grayscale, and so on) it will be converted over to the format
// specified here by CGBitmapContextCreate.
context = CGBitmapContextCreate (bitmapData,
pixelsWide,
pixelsHigh,
8, // bits per component
bitmapBytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedFirst);
if (context == NULL)
{
free (bitmapData);
fprintf (stderr, "Context not created!");
}
// Make sure and release colorspace before returning
CGColorSpaceRelease( colorSpace );
return context;
}
自定义图片取色
@interface ViewController ()<UIImagePickerControllerDelegate,UINavigationControllerDelegate>
@property(nonatomic,strong)UIImageView *imageView;
@property(nonatomic,strong)UIImageView *backgroundImageV;
@property (weak, nonatomic) IBOutlet UIView *colorView;
@property(nonatomic,assign)CGFloat scale;
@property(nonatomic,strong)UIView *viewCircle;
@property (nonatomic,assign)NSInteger openType;
@property (nonatomic,strong)UIPopoverController *imagePickerPopover;
@end
@implementation ViewController
- (void)viewDidLoad {
[super viewDidLoad];
// Do any additional setup after loading the view, typically from a
}
- (IBAction)getImageAction:(id)sender {
UIImagePickerController *picker = [[UIImagePickerController alloc] init];
[self.navigationController presentViewController:picker animated:YES completion:nil];
picker.delegate = self;
}
//计算imageView的frame
-(CGRect)getImageByScaleFromImage:(UIImage *)image
{
CGFloat widthScale = image.size.width / SCREEN_WIDTH;
CGFloat heightScale = image.size.height / SCREEN_HEIGHT;
self.scale = MAX(widthScale, heightScale);
return CGRectMake(0, (SCREEN_HEIGHT - (image.size.height - 64) / self.scale) / 2.0, image.size.width / self.scale, image.size.height / self.scale);
}
//修正图片的旋转方向
- (UIImage *)fixOrientation:(UIImage *)aImage {
if (aImage.imageOrientation == UIImageOrientationUp)
return aImage;
CGAffineTransform transform = CGAffineTransformIdentity;
switch (aImage.imageOrientation) {
case UIImageOrientationDown:
case UIImageOrientationDownMirrored:
transform = CGAffineTransformTranslate(transform, aImage.size.width, aImage.size.height);
transform = CGAffineTransformRotate(transform, M_PI);
break;
case UIImageOrientationLeft:
case UIImageOrientationLeftMirrored:
transform = CGAffineTransformTranslate(transform, aImage.size.width, 0);
transform = CGAffineTransformRotate(transform, M_PI_2);
break;
case UIImageOrientationRight:
case UIImageOrientationRightMirrored:
transform = CGAffineTransformTranslate(transform, 0, aImage.size.height);
transform = CGAffineTransformRotate(transform, -M_PI_2);
break;
default:
break;
}
switch (aImage.imageOrientation) {
case UIImageOrientationUpMirrored:
case UIImageOrientationDownMirrored:
transform = CGAffineTransformTranslate(transform, aImage.size.width, 0);
transform = CGAffineTransformScale(transform, -1, 1);
break;
case UIImageOrientationLeftMirrored:
case UIImageOrientationRightMirrored:
transform = CGAffineTransformTranslate(transform, aImage.size.height, 0);
transform = CGAffineTransformScale(transform, -1, 1);
break;
default:
break;
}
// Now we draw the underlying CGImage into a new context, applying the transform
// calculated above.
CGContextRef ctx = CGBitmapContextCreate(NULL, aImage.size.width, aImage.size.height,
CGImageGetBitsPerComponent(aImage.CGImage), 0,
CGImageGetColorSpace(aImage.CGImage),
CGImageGetBitmapInfo(aImage.CGImage));
CGContextConcatCTM(ctx, transform);
switch (aImage.imageOrientation) {
case UIImageOrientationLeft:
case UIImageOrientationLeftMirrored:
case UIImageOrientationRight:
case UIImageOrientationRightMirrored:
// Grr...
CGContextDrawImage(ctx, CGRectMake(0,0,aImage.size.height,aImage.size.width), aImage.CGImage);
break;
default:
CGContextDrawImage(ctx, CGRectMake(0,0,aImage.size.width,aImage.size.height), aImage.CGImage);
break;
}
// And now we just create a new UIImage from the drawing context
CGImageRef cgimg = CGBitmapContextCreateImage(ctx);
UIImage *img = [UIImage imageWithCGImage:cgimg];
CGContextRelease(ctx);
CGImageRelease(cgimg);
return img;
}
-(void)imagePickerController:(UIImagePickerController *)picker didFinishPickingMediaWithInfo:(NSDictionary *)info
{
UIImage *firstImage = [info valueForKey:UIImagePickerControllerOriginalImage];
UIImage *image = [self fixOrientation:firstImage];
NSLog(@"123 %f %f",image.size.width,image.size.height);
//将照片放入UIImageView对象中;
self.imageView=[[UIImageView alloc]init];
self.imageView.frame=[self getImageByScaleFromImage:image];
self.imageView.image = image;
self.backgroundImageV = [[UIImageView alloc]initWithFrame:CGRectMake(0, self.imageView.frame.origin.y, image.size.width, image.size.height)];
self.backgroundImageV.image = image;
UIView *coverView = [[UIView alloc]initWithFrame:self.backgroundImageV.frame];
coverView.backgroundColor = [UIColor whiteColor];
[self.view addSubview:self.backgroundImageV];
[self.view addSubview:coverView];
[self.view addSubview:self.imageView];
// self.imageView=imageView;
UIView *viewCircle=[[UIView alloc]init];
viewCircle.backgroundColor=[UIColor clearColor];
viewCircle.layer.borderColor=[[UIColor blackColor]CGColor];
viewCircle.layer.borderWidth=1.5;
viewCircle.frame=CGRectMake(0,64, 25, 25);
[viewCircle.layer setCornerRadius:CGRectGetHeight([viewCircle bounds]) / 2];
viewCircle.layer.masksToBounds=YES;
self.viewCircle=viewCircle;
[self.imageView addSubview:self.viewCircle];
if (self.openType == UIImagePickerControllerSourceTypeCamera) {
//将图片保存到图库
UIImageWriteToSavedPhotosAlbum(image, self, @selector(image:didFinishSavingWithError:contextInfo:), NULL);
}else if(self.openType == UIImagePickerControllerSourceTypePhotoLibrary){
//本身是从图库打开的,就不用保存到图库了;
}
//判断UIPopoverController对象是否存在
if (self.imagePickerPopover) {
[self.imagePickerPopover dismissPopoverAnimated:YES];
self.imagePickerPopover = nil;
}
else
{
//关闭以模态形式显示的UIImagePickerController
[self dismissViewControllerAnimated:YES completion:nil];
}
}
-(void)touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event
{
//触点对象
UITouch *touch=touches.anyObject;
//触点位置
CGPoint touchLocation=[touch locationInView:self.imageView];
NSLog(@"%f %f",touchLocation.x,touchLocation.y);
//触点颜色
UIColor *positionColor=[self getPixelColorAtLocation:touchLocation];
NSLog(@"====%@",positionColor);
self.colorView.backgroundColor = positionColor;
}
//
- (UIColor*) getPixelColorAtLocation:(CGPoint)point
{
UIColor* color = [UIColor whiteColor];
if (point.x < self.imageView.frame.size.width && point.x > 0 && point.y < self.imageView.frame.size.height && point.y > 0) {
UIImageView *colorImageView=self.backgroundImageV;
CGImageRef inImage = colorImageView.image.CGImage;
// Create off screen bitmap context to draw the image into. Format ARGB is 4 bytes for each pixel: Alpa, Red, Green, Blue
CGContextRef cgctx = [self createARGBBitmapContextFromImage:inImage];
if (cgctx == NULL)
{
return nil;
}
size_t w = CGImageGetWidth(inImage);
size_t h = CGImageGetHeight(inImage);
CGRect rect = {{0,0},{w,h}};
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(cgctx, rect, inImage);
// Now we can get a pointer to the image data associated with the bitmap
// context.
unsigned char* data = CGBitmapContextGetData (cgctx);
if (data != NULL)
{
//offset locates the pixel in the data from x,y.
//4 for 4 bytes of data per pixel, w is width of one row of data.
@try
{
int offset = 4*((w*round(point.y * self.scale))+round(point.x * self.scale));
//NSLog(@"offset: %d", offset);
int alpha = data[offset];
int red = data[offset+1];
int green = data[offset+2];
int blue = data[offset+3];
// NSLog(@"offset: %i colors: RGB A %i %i %i %i",offset,red,green,blue,alpha);
color = [UIColor colorWithRed:(red/255.0f) green:(green/255.0f) blue:(blue/255.0f) alpha:(alpha/255.0f)];
}
@catch (NSException * e)
{
NSLog(@"%@",[e reason]);
}
@finally
{
}
}
// When finished, release the context
CGContextRelease(cgctx);
// Free image data memory for the context
if (data)
{
free(data);
}
}
return color;
}
- (CGContextRef) createARGBBitmapContextFromImage:(CGImageRef) inImage
{
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
void * bitmapData;
int bitmapByteCount;
int bitmapBytesPerRow;
// Get image width, height. We'll use the entire image.
size_t pixelsWide = CGImageGetWidth(inImage);
size_t pixelsHigh = CGImageGetHeight(inImage);
// Declare the number of bytes per row. Each pixel in the bitmap in this
// example is represented by 4 bytes; 8 bits each of red, green, blue, and
// alpha.
bitmapBytesPerRow = (int)(pixelsWide * 4);
bitmapByteCount =(int)(bitmapBytesPerRow * pixelsHigh);
// Use the generic RGB color space.
colorSpace = CGColorSpaceCreateDeviceRGB();
if (colorSpace == NULL)
{
fprintf(stderr, "Error allocating color space\n");
return NULL;
}
// Allocate memory for image data. This is the destination in memory
// where any drawing to the bitmap context will be rendered.
bitmapData = malloc( bitmapByteCount );
if (bitmapData == NULL)
{
fprintf (stderr, "Memory not allocated!");
CGColorSpaceRelease( colorSpace );
return NULL;
}
// Create the bitmap context. We want pre-multiplied ARGB, 8-bits
// per component. Regardless of what the source image format is
// (CMYK, Grayscale, and so on) it will be converted over to the format
// specified here by CGBitmapContextCreate.
context = CGBitmapContextCreate (bitmapData,
pixelsWide,
pixelsHigh,
8, // bits per component
bitmapBytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedFirst);
if (context == NULL)
{
free (bitmapData);
fprintf (stderr, "Context not created!");
}
// Make sure and release colorspace before returning
CGColorSpaceRelease( colorSpace );
return context;
}
上一篇: Echarts 自定义颜色