How can I get the RGB value of a particular pixel in a UIImage?
You can't access the raw data directly, but by getting the CGImage of this image you can access it. here is a link to another question that answers your question and others you might have regarding detailed image manipulation : CGImage
Try this very simple code:
I used to detect a wall in my maze game (the only info that I need is the alpha channel, but I included the code to get the other colors for you):
- (BOOL)isWallPixel:(UIImage *)image xCoordinate:(int)x yCoordinate:(int)y {
CFDataRef pixelData = CGDataProviderCopyData(CGImageGetDataProvider(image.CGImage));
const UInt8* data = CFDataGetBytePtr(pixelData);
int pixelInfo = ((image.size.width * y) + x ) * 4; // The image is png
//UInt8 red = data[pixelInfo]; // If you need this info, enable it
//UInt8 green = data[(pixelInfo + 1)]; // If you need this info, enable it
//UInt8 blue = data[pixelInfo + 2]; // If you need this info, enable it
UInt8 alpha = data[pixelInfo + 3]; // I need only this info for my maze game
CFRelease(pixelData);
//UIColor* color = [UIColor colorWithRed:red/255.0f green:green/255.0f blue:blue/255.0f alpha:alpha/255.0f]; // The pixel color info
if (alpha) return YES;
else return NO;
}
OnTouch
-(void)touchesBegan:(NSSet *)touches withEvent:(UIEvent *)event
{
UITouch *touch = [[touches allObjects] objectAtIndex:0];
CGPoint point1 = [touch locationInView:self.view];
touch = [[event allTouches] anyObject];
if ([touch view] == imgZoneWheel)
{
CGPoint location = [touch locationInView:imgZoneWheel];
[self getPixelColorAtLocation:location];
if(alpha==255)
{
NSLog(@"In Image Touch view alpha %d",alpha);
[self translateCurrentTouchPoint:point1.x :point1.y];
[imgZoneWheel setImage:[UIImage imageNamed:[NSString stringWithFormat:@"blue%d.png",GrndFild]]];
}
}
}
- (UIColor*) getPixelColorAtLocation:(CGPoint)point
{
UIColor* color = nil;
CGImageRef inImage;
inImage = imgZoneWheel.image.CGImage;
// Create off screen bitmap context to draw the image into. Format ARGB is 4 bytes for each pixel: Alpa, Red, Green, Blue
CGContextRef cgctx = [self createARGBBitmapContextFromImage:inImage];
if (cgctx == NULL) { return nil; /* error */ }
size_t w = CGImageGetWidth(inImage);
size_t h = CGImageGetHeight(inImage);
CGRect rect = {{0,0},{w,h}};
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(cgctx, rect, inImage);
// Now we can get a pointer to the image data associated with the bitmap
// context.
unsigned char* data = CGBitmapContextGetData (cgctx);
if (data != NULL) {
//offset locates the pixel in the data from x,y.
//4 for 4 bytes of data per pixel, w is width of one row of data.
int offset = 4*((w*round(point.y))+round(point.x));
alpha = data[offset];
int red = data[offset+1];
int green = data[offset+2];
int blue = data[offset+3];
color = [UIColor colorWithRed:(red/255.0f) green:(green/255.0f) blue:(blue/255.0f) alpha:(alpha/255.0f)];
}
// When finished, release the context
//CGContextRelease(cgctx);
// Free image data memory for the context
if (data) { free(data); }
return color;
}
- (CGContextRef) createARGBBitmapContextFromImage:(CGImageRef)inImage
{
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
void * bitmapData;
int bitmapByteCount;
int bitmapBytesPerRow;
// Get image width, height. We'll use the entire image.
size_t pixelsWide = CGImageGetWidth(inImage);
size_t pixelsHigh = CGImageGetHeight(inImage);
// Declare the number of bytes per row. Each pixel in the bitmap in this
// example is represented by 4 bytes; 8 bits each of red, green, blue, and
// alpha.
bitmapBytesPerRow = (pixelsWide * 4);
bitmapByteCount = (bitmapBytesPerRow * pixelsHigh);
// Use the generic RGB color space.
colorSpace = CGColorSpaceCreateDeviceRGB();
if (colorSpace == NULL)
{
fprintf(stderr, "Error allocating color space\n");
return NULL;
}
// Allocate memory for image data. This is the destination in memory
// where any drawing to the bitmap context will be rendered.
bitmapData = malloc( bitmapByteCount );
if (bitmapData == NULL)
{
fprintf (stderr, "Memory not allocated!");
CGColorSpaceRelease( colorSpace );
return NULL;
}
// Create the bitmap context. We want pre-multiplied ARGB, 8-bits
// per component. Regardless of what the source image format is
// (CMYK, Grayscale, and so on) it will be converted over to the format
// specified here by CGBitmapContextCreate.
context = CGBitmapContextCreate (bitmapData,
pixelsWide,
pixelsHigh,
8, // bits per component
bitmapBytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedFirst);
if (context == NULL)
{
free (bitmapData);
fprintf (stderr, "Context not created!");
}
// Make sure and release colorspace before returning
CGColorSpaceRelease( colorSpace );
return context;
}
point = CGPointMake(point.x * image.scale, point.y * image.scale);
–
Tehee Some Swift code based on Minas' answer. Originally I had some code to figure out the pixel stride, but I've updated the answer to use the ComponentLayout from Desmond's answer. I've also moved the extension to CGImage.
Swift 5:
public extension UIImage {
func getPixelColor(_ point: CGPoint) -> UIColor {
guard let cgImage = self.cgImage else {
return UIColor.clear
}
return cgImage.getPixelColor(point)
}
}
public extension CGBitmapInfo {
// See https://mcmap.net/q/126792/-how-to-get-pixel-data-from-a-uiimage-cocoa-touch-or-cgimage-core-graphics
// I've extended it to include .a
enum ComponentLayout {
case a
case bgra
case abgr
case argb
case rgba
case bgr
case rgb
var count: Int {
switch self {
case .a: return 1
case .bgr, .rgb: return 3
default: return 4
}
}
}
var isAlphaPremultiplied: Bool {
let alphaInfo = CGImageAlphaInfo(rawValue: rawValue & Self.alphaInfoMask.rawValue)
return alphaInfo == .premultipliedFirst || alphaInfo == .premultipliedLast
}
// [...] skipping the rest
}
public extension CGImage {
func getPixelColor(_ point: CGPoint) -> UIColor {
guard let pixelData = self.dataProvider?.data, let layout = bitmapInfo.componentLayout, let data = CFDataGetBytePtr(pixelData) else {
return .clear
}
let x = Int(point.x)
let y = Int(point.y)
let w = self.width
let h = self.height
let index = w * y + x
let numBytes = CFDataGetLength(pixelData)
let numComponents = layout.count
if numBytes != w * h * numComponents {
NSLog("Unexpected size: \(numBytes) != \(w)x\(h)x\(numComponents)")
return .clear
}
let isAlphaPremultiplied = bitmapInfo.isAlphaPremultiplied
switch numComponents {
case 1:
return UIColor(red: 0, green: 0, blue: 0, alpha: CGFloat(data[index])/255.0)
case 3:
let c0 = CGFloat((data[3*index])) / 255
let c1 = CGFloat((data[3*index+1])) / 255
let c2 = CGFloat((data[3*index+2])) / 255
if layout == .bgr {
return UIColor(red: c2, green: c1, blue: c0, alpha: 1.0)
}
return UIColor(red: c0, green: c1, blue: c2, alpha: 1.0)
case 4:
let c0 = CGFloat((data[4*index])) / 255
let c1 = CGFloat((data[4*index+1])) / 255
let c2 = CGFloat((data[4*index+2])) / 255
let c3 = CGFloat((data[4*index+3])) / 255
var r: CGFloat = 0
var g: CGFloat = 0
var b: CGFloat = 0
var a: CGFloat = 0
switch layout {
case .abgr:
a = c0; b = c1; g = c2; r = c3
case .argb:
a = c0; r = c1; g = c2; b = c3
case .bgra:
b = c0; g = c1; r = c2; a = c3
case .rgba:
r = c0; g = c1; b = c2; a = c3
default:
break
}
if isAlphaPremultiplied && a > 0 {
r = r / a
g = g / a
b = b / a
}
return UIColor(red: r, green: g, blue: b, alpha: a)
default:
return .clear
}
}
I was trying to refactor it to use ranges, but this doesn't seem to work,
let start = numComponents * index
let end = numComponents * (index + 1)
let c = data[start ..< end] // expects Int, not a Range...
isMask
property of CGImage
. –
Soppy image.size
, use cgImage.width
and cgImage.height
instead. Also, adjust the given point using image.scale
. Otherwise this code won't work with Retina images (@2x and @3x) –
Morphine Value of type 'CGBitmapInfo' has no member 'isAlphaPremultiplied'
–
Cacia isAlphaPremultiplied
for clarity. I had omitted originally because it comes from Desmond's answer. Please follow the link and check the extended definitions of CGBitmapInfo
that he proposes. –
Xebec Swift 5 version
The answers given here are either outdated or incorrect because they don't take into account the following:
- The pixel size of the image can differ from its point size that is returned by
image.size.width
/image.size.height
. - There can be various layouts used by pixel components in the image, such as BGRA, ABGR, ARGB etc. or may not have an alpha component at all, such as BGR and RGB. For example,
UIView.drawHierarchy(in:afterScreenUpdates:)
method can produce BGRA images. - Color components can be premultiplied by the alpha for all pixels in the image and need to be divided by alpha in order to restore the original color.
- For memory optimization used by
CGImage
, the size of a pixel row in bytes can be greater than the mere multiplication of the pixel width by 4.
The code below is to provide a universal Swift 5 solution to get the UIColor
of a pixel for all such special cases. The code is optimized for usability and clarity, not for performance.
public extension UIImage {
var pixelWidth: Int {
return cgImage?.width ?? 0
}
var pixelHeight: Int {
return cgImage?.height ?? 0
}
func pixelColor(x: Int, y: Int) -> UIColor {
assert(
0 ..< pixelWidth ~= x && 0 ..< pixelHeight ~= y,
"Pixel coordinates are out of bounds"
)
guard
let cgImage = cgImage,
let data = cgImage.dataProvider?.data,
let dataPtr = CFDataGetBytePtr(data),
let colorSpaceModel = cgImage.colorSpace?.model,
let componentLayout = cgImage.bitmapInfo.componentLayout
else {
assertionFailure("Could not get a pixel of an image")
return .clear
}
assert(
colorSpaceModel == .rgb,
"The only supported color space model is RGB"
)
assert(
cgImage.bitsPerPixel == 32 || cgImage.bitsPerPixel == 24,
"A pixel is expected to be either 4 or 3 bytes in size"
)
let bytesPerRow = cgImage.bytesPerRow
let bytesPerPixel = cgImage.bitsPerPixel / 8
let pixelOffset = y * bytesPerRow + x * bytesPerPixel
if componentLayout.count == 4 {
let components = (
dataPtr[pixelOffset + 0],
dataPtr[pixelOffset + 1],
dataPtr[pixelOffset + 2],
dataPtr[pixelOffset + 3]
)
var alpha: UInt8 = 0
var red: UInt8 = 0
var green: UInt8 = 0
var blue: UInt8 = 0
switch componentLayout {
case .bgra:
alpha = components.3
red = components.2
green = components.1
blue = components.0
case .abgr:
alpha = components.0
red = components.3
green = components.2
blue = components.1
case .argb:
alpha = components.0
red = components.1
green = components.2
blue = components.3
case .rgba:
alpha = components.3
red = components.0
green = components.1
blue = components.2
default:
return .clear
}
/// If chroma components are premultiplied by alpha and the alpha is `0`,
/// keep the chroma components to their current values.
if cgImage.bitmapInfo.chromaIsPremultipliedByAlpha, alpha != 0 {
let invisibleUnitAlpha = 255 / CGFloat(alpha)
red = UInt8((CGFloat(red) * invisibleUnitAlpha).rounded())
green = UInt8((CGFloat(green) * invisibleUnitAlpha).rounded())
blue = UInt8((CGFloat(blue) * invisibleUnitAlpha).rounded())
}
return .init(red: red, green: green, blue: blue, alpha: alpha)
} else if componentLayout.count == 3 {
let components = (
dataPtr[pixelOffset + 0],
dataPtr[pixelOffset + 1],
dataPtr[pixelOffset + 2]
)
var red: UInt8 = 0
var green: UInt8 = 0
var blue: UInt8 = 0
switch componentLayout {
case .bgr:
red = components.2
green = components.1
blue = components.0
case .rgb:
red = components.0
green = components.1
blue = components.2
default:
return .clear
}
return .init(red: red, green: green, blue: blue, alpha: UInt8(255))
} else {
assertionFailure("Unsupported number of pixel components")
return .clear
}
}
}
public extension UIColor {
convenience init(red: UInt8, green: UInt8, blue: UInt8, alpha: UInt8) {
self.init(
red: CGFloat(red) / 255,
green: CGFloat(green) / 255,
blue: CGFloat(blue) / 255,
alpha: CGFloat(alpha) / 255
)
}
}
public extension CGBitmapInfo {
enum ComponentLayout {
case bgra
case abgr
case argb
case rgba
case bgr
case rgb
var count: Int {
switch self {
case .bgr, .rgb: return 3
default: return 4
}
}
}
var componentLayout: ComponentLayout? {
guard let alphaInfo = CGImageAlphaInfo(rawValue: rawValue & Self.alphaInfoMask.rawValue) else { return nil }
let isLittleEndian = contains(.byteOrder32Little)
if alphaInfo == .none {
return isLittleEndian ? .bgr : .rgb
}
let alphaIsFirst = alphaInfo == .premultipliedFirst || alphaInfo == .first || alphaInfo == .noneSkipFirst
if isLittleEndian {
return alphaIsFirst ? .bgra : .abgr
} else {
return alphaIsFirst ? .argb : .rgba
}
}
var chromaIsPremultipliedByAlpha: Bool {
let alphaInfo = CGImageAlphaInfo(rawValue: rawValue & Self.alphaInfoMask.rawValue)
return alphaInfo == .premultipliedFirst || alphaInfo == .premultipliedLast
}
}
You can't access the raw data directly, but by getting the CGImage of this image you can access it. here is a link to another question that answers your question and others you might have regarding detailed image manipulation : CGImage
Here's a generic method for getting pixel color in a UI image, building on Minas Petterson's answer:
- (UIColor*)pixelColorInImage:(UIImage*)image atX:(int)x atY:(int)y {
CFDataRef pixelData = CGDataProviderCopyData(CGImageGetDataProvider(image.CGImage));
const UInt8* data = CFDataGetBytePtr(pixelData);
int pixelInfo = ((image.size.width * y) + x ) * 4; // 4 bytes per pixel
UInt8 red = data[pixelInfo + 0];
UInt8 green = data[pixelInfo + 1];
UInt8 blue = data[pixelInfo + 2];
UInt8 alpha = data[pixelInfo + 3];
CFRelease(pixelData);
return [UIColor colorWithRed:red /255.0f
green:green/255.0f
blue:blue /255.0f
alpha:alpha/255.0f];
}
Note that X and Y may be swapped; this function accesses the underlying bitmap directly and doesn't consider rotations that may be part of the UIImage.
- (UIColor *)colorAtPixel:(CGPoint)point inImage:(UIImage *)image {
if (!CGRectContainsPoint(CGRectMake(0.0f, 0.0f, image.size.width, image.size.height), point)) {
return nil;
}
// Create a 1x1 pixel byte array and bitmap context to draw the pixel into.
NSInteger pointX = trunc(point.x);
NSInteger pointY = trunc(point.y);
CGImageRef cgImage = image.CGImage;
NSUInteger width = image.size.width;
NSUInteger height = image.size.height;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
int bytesPerPixel = 4;
int bytesPerRow = bytesPerPixel * 1;
NSUInteger bitsPerComponent = 8;
unsigned char pixelData[4] = { 0, 0, 0, 0 };
CGContextRef context = CGBitmapContextCreate(pixelData, 1, 1, bitsPerComponent, bytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextSetBlendMode(context, kCGBlendModeCopy);
// Draw the pixel we are interested in onto the bitmap context
CGContextTranslateCTM(context, -pointX, pointY-(CGFloat)height);
CGContextDrawImage(context, CGRectMake(0.0f, 0.0f, (CGFloat)width, (CGFloat)height), cgImage);
CGContextRelease(context);
// Convert color values [0..255] to floats [0.0..1.0]
CGFloat red = (CGFloat)pixelData[0] / 255.0f;
CGFloat green = (CGFloat)pixelData[1] / 255.0f;
CGFloat blue = (CGFloat)pixelData[2] / 255.0f;
CGFloat alpha = (CGFloat)pixelData[3] / 255.0f;
return [UIColor colorWithRed:red green:green blue:blue alpha:alpha];
}
Swift version of Minas answer
extension CGImage {
func pixel(x: Int, y: Int) -> (r: Int, g: Int, b: Int, a: Int)? { // swiftlint:disable:this large_tuple
guard let pixelData = dataProvider?.data,
let data = CFDataGetBytePtr(pixelData) else { return nil }
let pixelInfo = ((width * y) + x ) * 4
let red = Int(data[pixelInfo]) // If you need this info, enable it
let green = Int(data[(pixelInfo + 1)]) // If you need this info, enable it
let blue = Int(data[pixelInfo + 2]) // If you need this info, enable it
let alpha = Int(data[pixelInfo + 3]) // I need only this info for my maze game
return (red, green, blue, alpha)
}
}
First of all create and attach tap gesture recognizer allow allow user interactions:
UITapGestureRecognizer * tapRecognizer = [[UITapGestureRecognizer alloc] initWithTarget:self action:@selector(tapGesture:)];
[self.label addGestureRecognizer:tapRecognizer];
self.label.userInteractionEnabled = YES;
Now implement -tapGesture:
- (void)tapGesture:(UITapGestureRecognizer *)recognizer
{
CGPoint point = [recognizer locationInView:self.label];
UIGraphicsBeginImageContext(self.label.bounds.size);
CGContextRef context = UIGraphicsGetCurrentContext();
[self.label.layer renderInContext:context];
int bpr = CGBitmapContextGetBytesPerRow(context);
unsigned char * data = CGBitmapContextGetData(context);
if (data != NULL)
{
int offset = bpr*round(point.y) + 4*round(point.x);
int blue = data[offset+0];
int green = data[offset+1];
int red = data[offset+2];
int alpha = data[offset+3];
NSLog(@"%d %d %d %d", alpha, red, green, blue);
if (alpha == 0)
{
// Here is tap out of text
}
else
{
// Here is tap right into text
}
}
UIGraphicsEndImageContext();
}
This will works on UILabel with transparent background, if this is not what you want you can compare alpha, red, green, blue with self.label.backgroundColor
...
© 2022 - 2024 — McMap. All rights reserved.