Reputation: 1
I am trying to get a view to position on top of an image. I have an xCode App with an iPhone image in an UIImageView and a UIView that positions itself on top of image to show iPhone screens. It worked in earlier releases because the image was a static size, but now I need the image to scale to fill in the screen for new devices like iPhone X. Think Aspect Fit is part of the answer. I need the view in question to position properly across all iPhone devices and the simulators. I have some code that seems to work for devices but then that same code works differently in the Simulators. I need the code to work the same in the simulators and devices as I do not have all devices to test with.
How can I, either through the story board or code, position a rectangle on top of an image that is being scaled by the system? Need a universal solution that works across devices and in the simulator.
Am including rough code samples I have been experimenting with.
On the Storyboard is the Weedpaper view controller that has a the Weedpaper title, "for Apple iPhone" text, the iPhone Image, the UIView that I want to position properly on top of the iPhone image, "number of Weedpapers installed" text and a row of autosizing buttons along the bottom.
Need the UIView to position on top of the image which is being scaled by the system across devices and the simulator.
override func viewDidAppear(_ animated: Bool) {
if (UIDevice.modelName.contains("Simulator"))
{
deviceName = ProcessInfo.init().environment["SIMULATOR_DEVICE_NAME"] ?? "NoN"
bSimulator = true
}
else
{
deviceName = UIDevice.modelName
bSimulator = false
}
print("deviceName:", deviceName)
print("simulator:", bSimulator)
var frame:CGRect!
if bSimulator{
frame = self.getImageRectForSimulators()
}
else
{
frame = self.getImageRectForDevices()
}
self.uiViewPhotos.frame = frame
self.uiViewPhotos.isHidden = false
}
func getImageRectForDevices() -> CGRect
{
// Write the view to an image so we can get the positioning rectangle
// Positioning Rectangle is a black rectangle in the image
// it has the only true black pixels in the image
let imgView:UIImageView = self.uiImageView
let img:UIImage = self.uiImageView.asImage()
// Write to the photo album for testing
//UIImageWriteToSavedPhotosAlbum(img, nil, nil, nil)
var pixelData = img.cgImage!.dataProvider!.data
var data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let maxX = img.size.width
let maxY = img.size.height
let halfwayX = maxX / 2
let halfwayY = maxY / 2
let screenScale = UIScreen.main.scale
let scaleFactorX = img.scale
let scaleFactorY = img.scale * screenScale
let screenFactor = UIScreen.main.bounds.width/UIScreen.main.bounds.height
let imgViewFactor = self.uiImageView.frame.width/self.uiImageView.frame.height
var pnt:CGPoint = CGPoint(x: -1, y: -1)
var pixelInfo: Int = -1
var r:CGFloat!, g:CGFloat!
var b:CGFloat!, a:CGFloat!
var uiColor:UIColor!
var v1:CGFloat!, v2:CGFloat!
var v3:CGFloat!, v4:CGFloat!
var newRect:CGRect!
// Find this color in the image to locate the black pixel frame
// use that to size the view accordingly
// Seems to change among devices so round use black color
let uiColor_phoneFramePixelColor = UIColor(red:0.0, green:0.0, blue:0.0, alpha:1.0)
// Device code
for i in stride(from: halfwayX*scaleFactorX, to: 0, by: -1)
{
pnt.x = i
pnt.y = halfwayY*scaleFactorY
pixelInfo = ((Int(img.size.width) * Int(pnt.y)) + Int(pnt.x)) * 4
r = CGFloat(data[pixelInfo])/CGFloat(255.0)
g = CGFloat(data[pixelInfo+1])/CGFloat(255.0)
b = CGFloat(data[pixelInfo+2])/CGFloat(255.0)
a = CGFloat(data[pixelInfo+3])/CGFloat(255.0)
uiColor = UIColor(red: r, green: g, blue: b, alpha: a)
print("searching for i black pixel at i, y:", i, pnt.y, 255.0*r, 255.0*g, 255.0*b, a)
if (uiColor == uiColor_phoneFramePixelColor)
{
v1 = i
print("found i pixel at:", i)
break
}
}
print(" ")
// find top y pixel
// Device code
for j in stride(from: halfwayY*scaleFactorY, to: 0, by: -1)
{
pnt.x = halfwayX*scaleFactorX
pnt.y = j
pixelInfo = ((Int(img.size.width) * Int(pnt.y)) + Int(pnt.x)) * 4
r = CGFloat(data[pixelInfo])/CGFloat(255.0)
g = CGFloat(data[pixelInfo+1])/CGFloat(255.0)
b = CGFloat(data[pixelInfo+2])/CGFloat(255.0)
a = CGFloat(data[pixelInfo+3])/CGFloat(255.0)
uiColor = UIColor(red: r, green: g, blue: b, alpha: a)
print("searching for j black pixel at j, x:", j, pnt.x, 255.0*r, 255.0*g, 255.0*b, a)
if (uiColor == uiColor_phoneFramePixelColor)
{
v2 = j
print("found j pixel at:", j)
break
}
}
print(" ")
// Find bottom x pixel
// Device code
for k in stride(from: halfwayX*scaleFactorX, to: maxX*scaleFactorX, by: 1)
{
pnt.x = k
pnt.y = halfwayY
pixelInfo = ((Int(img.size.width) * Int(pnt.y)) + Int(pnt.x)) * 4
r = CGFloat(data[pixelInfo])/CGFloat(255.0)
g = CGFloat(data[pixelInfo+1])/CGFloat(255.0)
b = CGFloat(data[pixelInfo+2])/CGFloat(255.0)
a = CGFloat(data[pixelInfo+3])/CGFloat(255.0)
uiColor = UIColor(red: r, green: g, blue: b, alpha: a)
print("searching for k black pixel at k, y:", k, pnt.y, 255.0*r, 255.0*g, 255.0*b, a)
if (uiColor == uiColor_phoneFramePixelColor)
{
v3 = k
print("found bottom k pixel at:", k)
break
}
}
print(" ")
// Find bottom y pixel
// Device code
for l in stride(from: halfwayY*scaleFactorY, to: maxY*scaleFactorY, by: 1)
{
pnt.x = halfwayX
pnt.y = l
pixelInfo = ((Int(img.size.width) * Int(pnt.y)) + Int(pnt.x)) * 4
r = CGFloat(data[pixelInfo])/CGFloat(255.0)
g = CGFloat(data[pixelInfo+1])/CGFloat(255.0)
b = CGFloat(data[pixelInfo+2])/CGFloat(255.0)
a = CGFloat(data[pixelInfo+3])/CGFloat(255.0)
uiColor = UIColor(red: r, green: g, blue: b, alpha: a)
print("searching for l black pixel at l, x:", l, pnt.x, 255.0*r, 255.0*g, 255.0*b, a)
if (uiColor == uiColor_phoneFramePixelColor)
{
v4 = l
print("found bottom l pixel at:", l)
break
}
}
print(" ")
// this is the Black Rectangle from the bitmap of the orginal image
let w = (v3 - v1)
let h = (v4 - v2)
newRect = CGRect(x: v1/scaleFactorX, y: v2/scaleFactorY, width: w/scaleFactorX, height: h/scaleFactorY)
print("calculated rectangle:", newRect)
return newRect
}
extension UIView {
func asImage()-> UIImage
{
// Get an image of the view. Apple discourages using UIGraphicsBeginImageContext
// Starting with IOS 10 UIGraphicsBeginImageContext is sRBG only and 32 bit only.
// Use UIGraphicsImageRenderer
if #available(iOS 10.0, *) {
let renderer = UIGraphicsImageRenderer(bounds: bounds)
let renderFormat = UIGraphicsImageRendererFormat.default()
renderFormat.opaque = false
let renderedImage = renderer.image {
rendererContext in
layer.render(in: rendererContext.cgContext)
}
return renderedImage
}
else{
UIGraphicsBeginImageContext(self.frame.size)
self.layer.render(in: UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return UIImage(cgImage: image!.cgImage!)
}
}
}
Upvotes: 0
Views: 468
Reputation: 1
Finally got it working. Seems to work across the simulators and the devices I have : The pixel comparison code I was using was not working properly, here is the link that helped me figure it out: Why do I get the wrong color of a pixel with following code?. Here is what worked for me. The iPhone image, not shown, is an iPhone image that has a black rectangle where you want the screen to be and is set to Aspect Fit.
private func getiPhoneScreenRect() -> (rect: CGRect, bOk: Bool){
// Write the view to an image so we can get the positioning rectangle
// Positioning Rectangle is a black rectangle in the image
// it has the only true black pixels in the image
// but the pixels may not be true black when we look at them so loosen
// equality criteria
let imgView:UIImageView = self.uiImageView
let img:UIImage = self.uiImageView.screenShotViaRenderImage()
let pixelData = img.cgImage!.dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let bytesPerPixel = (img.cgImage?.bitsPerPixel)!
let bytesPerRow = img.cgImage!.bytesPerRow
let maxX = img.size.width
let maxY = img.size.height
let halfwayX = maxX / 2
let halfwayY = maxY / 2
let imgScale = img.scale
var pnt:CGPoint = CGPoint(x: -1, y: -1)
var pixelInfo: Int = -1
var r:CGFloat!, g:CGFloat!
var b:CGFloat!, a:CGFloat!
var v1:CGFloat = 0.0, v2:CGFloat = 0.0
var v3:CGFloat = 0.0, v4:CGFloat = 0.0
var newRect:CGRect!
// Find the black border in the image byfinding the black pixel frame
// use that to size the view accordingly
// Seems to change among devices so dont check for pure black
// From center towards left edge find black pixel
for i in stride(from: halfwayX*imgScale, to: 0, by: -1)
{
pnt.x = i
pnt.y = halfwayY*imgScale
pixelInfo = Int(pnt.y) * bytesPerRow + Int(pnt.x) * 4
r = CGFloat(data[pixelInfo])/CGFloat(255.0)
g = CGFloat(data[pixelInfo+1])/CGFloat(255.0)
b = CGFloat(data[pixelInfo+2])/CGFloat(255.0)
a = CGFloat(data[pixelInfo+3])/CGFloat(255.0)
// No true black in image so get close
if (r*255.0 <= 3.0 && g*255.0 <= 3.0)
{
v1 = i
break
}
}
// From center towards top find top y pixel
for j in stride(from: halfwayY*imgScale, to: 0, by: -1)
{
pnt.x = halfwayX*imgScale
pnt.y = j
pixelInfo = Int(pnt.y) * bytesPerRow + Int(pnt.x) * 4
r = CGFloat(data[pixelInfo])/CGFloat(255.0)
g = CGFloat(data[pixelInfo+1])/CGFloat(255.0)
b = CGFloat(data[pixelInfo+2])/CGFloat(255.0)
a = CGFloat(data[pixelInfo+3])/CGFloat(255.0)
if (r*255.0 <= 3.0 && g*255.0 <= 3.0)
{
v2 = j
break
}
}
// From center towards bottom Find bottom x pixel
for k in stride(from:halfwayX*imgScale, to: maxX*imgScale-1, by: 1)
{
pnt.x = k
pnt.y = halfwayY*imgScale
pixelInfo = Int(pnt.y) * bytesPerRow + Int(pnt.x) * 4
r = CGFloat(data[pixelInfo])/CGFloat(255.0)
g = CGFloat(data[pixelInfo+1])/CGFloat(255.0)
b = CGFloat(data[pixelInfo+2])/CGFloat(255.0)
a = CGFloat(data[pixelInfo+3])/CGFloat(255.0)
if (r*255.0 <= 3.0 && g*255.0 <= 3.0)
{
v3 = k
break
}
}
// Find center towards right edge find bottom y pixel
for l in stride(from: halfwayY*imgScale, to: (maxY*imgScale)-1, by: 1)
{
pnt.x = halfwayX*imgScale
pnt.y = l
pixelInfo = Int(pnt.y) * bytesPerRow + Int(pnt.x) * 4
r = CGFloat(data[pixelInfo])/CGFloat(255.0)
g = CGFloat(data[pixelInfo+1])/CGFloat(255.0)
b = CGFloat(data[pixelInfo+2])/CGFloat(255.0)
a = CGFloat(data[pixelInfo+3])/CGFloat(255.0)
if (r*255.0 <= 3.0 && g*255.0 <= 3.0)
{
v4 = l
break
}
}
// If did not find rectangle return bOk false
if (v1 <= 0.0 || v2 <= 0.0 || v3 <= 0.0 || v4 <= 0.0)
|| v3 >= (maxX*imgScale)-1 || v4 >= (maxY*imgScale)-1
{
return (newRect, false)
}
let w = (v3 - v1)
let h = (v4 - v2)
// this is the Black Rectangle from the bitmap of screenshot of the view
// inset the frame by 1 pixel
newRect = CGRect(x: (v1+2)/imgScale, y: (v2+2)/imgScale, width: (w-2)/imgScale, height: (h-2)/imgScale)
return (newRect, true)
}
Get the screen shot like this
extension UIView {
func screenShotViaRenderImage()-> UIImage
{
// Get an image of the view. Apple discourages using UIGraphicsBeginImageContext
// Starting with IOS 10 UIGraphicsBeginImageContext is sRBG only and 32 bit only.
// Use UIGraphicsImageRenderer
if #available(iOS 10.0, *) {
let rendererFormat = UIGraphicsImageRendererFormat.default()
rendererFormat.opaque = false
let renderer = UIGraphicsImageRenderer(bounds: bounds, format: rendererFormat)
let screenShot = renderer.image {
rendererContext in
layer.render(in: rendererContext.cgContext)
}
return screenShot
}
else{
UIGraphicsBeginImageContextWithOptions(bounds.size, false, 0.0)
self.layer.render(in: UIGraphicsGetCurrentContext()!)
let screenShot = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return UIImage(cgImage: screenShot!.cgImage!)
}
}
}
then call it like this
override func viewWillLayoutSubviews() {
// Different images depending on where we call it from viewDidAppear, viewWillAppear
// This will make the screen size into the black rectangle in the phone image
// the image looses detail so the black frame may not be totally black
//
let iPhoneScreenFrame = self.getiPhoneScreenRect()
if (iPhoneScreenFrame.bOk)
{
self.someScreen.frame = iPhoneScreenFrame.rect
}
}
Upvotes: 0
Reputation:
For the UIImage
, make sure it is pinned to the edges (0). Scaling the image depends on your image size and dimensions. See what works best. Scale To Fill
could work.
For the UIView
, you will likely need to play around with a variety of NSLayoutConstraints
that activate and deactivate based upon the different screen sizes. NSLayoutConstraint
has a class method called activate()
that activates multiple constraints at once, which should allow Auto Layout to update its entire layout at the same time. For example:
NSLayoutConstraint.activate([
vw.leadingAnchor.constraint(equalTo: view.safeAreaLayoutGuide.leadingAnchor, constant: 20),
vw.trailingAnchor.constraint(equalTo: view.safeAreaLayoutGuide.trailingAnchor, constant: -20),
vw.heightAnchor.constraint(equalToConstant: 100),
vw.centerYAnchor.constraint(equalTo: view.safeAreaLayoutGuide.
Keep in mind, that these constraints can also be deactivated:
NSLayoutConstraint.deactivate([
vw.leadingAnchor.constraint(equalTo: view.safeAreaLayoutGuide.leadingAnchor, constant: 20),
vw.trailingAnchor.constraint(equalTo: view.safeAreaLayoutGuide.trailingAnchor, constant: -20),
vw.heightAnchor.constraint(equalToConstant: 100),
vw.centerYAnchor.constraint(equalTo: view.safeAreaLayoutGuide.centerYAnchor)
])
Upvotes: 0