Reputation: 1
I'm getting an error
DepthwiseConv2D only support number of groups == number of input channels at the moment
when trying to use an .onnx
model loading from Barracuda in Unity. How do I fix my model to accommodate this?
I'm trying to use the MiDaS depth estimation model.
Here's my script:
using System.Collections;
using UnityEngine;
using Unity.Barracuda;
using UnityEngine.UI;
public class DepthEstimation : MonoBehaviour
{
public NNModel depthModel; // ONNX model imported as a NNModel in Unity
public RawImage outputDisplay; // UI element to display the depth map
public Camera mainCamera;
public int res;
private Model runtimeModel;
private IWorker worker;
public float colorMult;
public Texture2D testimage;
void Start()
{
// Load and initialize the Barracuda model
runtimeModel = ModelLoader.Load(depthModel);
worker = WorkerFactory.CreateWorker(WorkerFactory.Type.ComputePrecompiled, runtimeModel);
}
void Update()
{
// Create RenderTexture and assign it to the camera temporarily
RenderTexture renderTexture = new RenderTexture(res, res, 0); // Adjust resolution as needed
mainCamera.targetTexture = renderTexture;
mainCamera.Render();
// Set RenderTexture.active and capture the camera image
RenderTexture.active = renderTexture;
Texture2D cameraImage = testimage;
cameraImage.ReadPixels(new Rect(0, 0, renderTexture.width, renderTexture.height), 0, 0);
cameraImage.Apply();
// Clear the targetTexture from the camera to avoid conflicts
mainCamera.targetTexture = null;
RenderTexture.active = null;
// Preprocess the image (resize, normalize, etc.)
Tensor inputTensor = PreprocessImage(cameraImage);
// Run the depth estimation model
worker.Execute(inputTensor);
// Peek output tensor and process it
using (Tensor outputTensor = worker.PeekOutput())
{
// Convert the output tensor to a grayscale texture
Texture2D depthMap = PostProcessOutput(outputTensor);
outputDisplay.texture = depthMap;
}
// Clean up
inputTensor.Dispose();
renderTexture.Release(); // Release the RenderTexture to free GPU memory
Destroy(renderTexture);
Destroy(cameraImage);
}
Tensor PreprocessImage(Texture2D image)
{
int width = image.width;
int height = image.height;
float[] floatValues = new float[width * height * 3];
Color[] pixels = image.GetPixels();
for (int i = 0; i < pixels.Length; i++)
{
Color pixel = pixels[i];
floatValues[i * 3] = pixel.r;
floatValues[i * 3 + 1] = pixel.g;
floatValues[i * 3 + 2] = pixel.b;
}
return new Tensor(1, height, width, 3, floatValues);
}
Texture2D PostProcessOutput(Tensor output)
{
int height = output.shape.height;
int width = output.shape.width;
Texture2D depthTexture = new Texture2D(width, height, TextureFormat.RGB24 , false);
float minDepth = float.MaxValue;
float maxDepth = float.MinValue;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
float depthValue = output[0, y, x, 0];
if (depthValue < minDepth) minDepth = depthValue;
if (depthValue > maxDepth) maxDepth = depthValue;
}
}
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
float depthValue = output[0, y, x, 0];
depthValue = (depthValue - minDepth) / (maxDepth - minDepth);
depthTexture.SetPixel(x, y, Color.Lerp(Color.red, Color.blue, depthValue));
}
}
Debug.Log($"Sample Depth Value at (0, 0): {output[0, 0, 0, 0]}");
depthTexture.Apply();
return depthTexture;
}
void OnDestroy()
{
worker?.Dispose();
}
}
I tried to decrease the number of groups but that made a mismatch error. I expected it to make it work but it instead produced a separate error which is a whole different issue.
Upvotes: 0
Views: 40