Reputation: 573
I have a coreml model which after running, returns a VNCoreMLFeatureValueObservation object with 1 "MultiArray : Double 10 x IMG_SIZE x IMG_SIZE array"
How would I transform this into 10 UIImages each with IMG_SIZE x IMG_SIZE dimensions, and their values being a grayscale?
Upvotes: 1
Views: 1234
Reputation: 573
After snooping around a bit I found I had to add these Helper Functions:
https://github.com/hollance/CoreMLHelpers to my Xcode Project. And from a MultiArray Initialization question: https://stackoverflow.com/a/44462908/403403
I then cobbled together this solution:
let request = VNCoreMLRequest(model: model) { (request, error) in
guard let results = request.results as? [VNCoreMLFeatureValueObservation] else {
fatalError("Model failed to process image")
}
let obs : VNCoreMLFeatureValueObservation = (results.first)!
let m: MLMultiArray = obs.featureValue.multiArrayValue!
var mArrays = [MLMultiArray]()
for i in 0..<10 {
let start = i*(IMG_SIZE*IMG_SIZE)
guard let tmp : MLMultiArray = try? MLMultiArray(shape:[768,768], dataType:MLMultiArrayDataType.double) else {
fatalError("Unexpected runtime error. MLMultiArray")
}
for n in 0..<(IMG_SIZE*IMG_SIZE) {
tmp[n] = m[start+n]
}
mArrays.append(tmp)
}
self.imagePred0.image = mArrays[0].image(offset: 0, scale: 255)!
self.imagePred1.image = mArrays[1].image(offset: 0, scale: 255)!
self.imagePred2.image = mArrays[2].image(offset: 0, scale: 255)!
self.imagePred3.image = mArrays[3].image(offset: 0, scale: 255)!
self.imagePred4.image = mArrays[4].image(offset: 0, scale: 255)!
self.imagePred5.image = mArrays[5].image(offset: 0, scale: 255)!
self.imagePred6.image = mArrays[6].image(offset: 0, scale: 255)!
self.imagePred7.image = mArrays[7].image(offset: 0, scale: 255)!
self.imagePred8.image = mArrays[8].image(offset: 0, scale: 255)!
self.imagePred9.image = mArrays[9].image(offset: 0, scale: 255)!
}
wish there was a cleaner way, but works for now
Upvotes: 2