Reputation: 13
I’m building an Android application using the Camera2 API with Jetpack Compose to capture images upon a button press. My goal is to display a camera preview using a TextureView, capture images in JPEG format using ImageReader, and, if supported, also capture depth data in DEPTH16 format. However, I’ve run into a problem where the code block within ImageReader.setOnImageAvailableListener() does not execute at all, which prevents me from retrieving the captured image.
My codes are as follows:
package com.android.wounddet.camera
import android.*
import android.annotation.SuppressLint
import android.content.ContentValues
import android.content.Context
import android.content.pm.*
import android.graphics.Bitmap
import android.graphics.BitmapFactory
import android.graphics.ImageFormat
import android.graphics.SurfaceTexture
import android.hardware.camera2.CameraCaptureSession
import android.hardware.camera2.CameraCharacteristics
import android.hardware.camera2.CameraDevice
import android.hardware.camera2.CameraManager
import android.hardware.camera2.CaptureRequest
import android.media.ImageReader
import android.os.*
import android.provider.MediaStore
import android.util.Log
import android.util.Size
import android.view.*
import androidx.compose.foundation.layout.*
import androidx.compose.material3.*
import androidx.compose.runtime.*
import androidx.compose.ui.*
import androidx.compose.ui.platform.*
import androidx.compose.ui.unit.*
import androidx.compose.ui.viewinterop.AndroidView
import androidx.core.content.*
import java.lang.Exception
@Composable
fun CamPreview(
onImageCaptured: (Bitmap) -> Unit,
onImageDCaptured: (ByteArray) -> Unit,
onDismiss: () -> Unit
) {
var captureSession: CameraCaptureSession? = null
var cameraDevice: CameraDevice? = null
var imageReader: ImageReader? = null
var depthImageReader: ImageReader? = null
var handlerThread: HandlerThread? = null
var handler: Handler? = null
var errorMessage by remember {mutableStateOf<String?>(null)}
var supportsDep: Boolean = false
Box(modifier = Modifier.fillMaxSize()) {
// Preview Window
AndroidView(
factory = {context ->
TextureView(context).apply {
surfaceTextureListener = object: TextureView.SurfaceTextureListener {
override fun onSurfaceTextureAvailable(
surfaceTexture: SurfaceTexture,
width: Int,
height: Int
) {
handlerThread = HandlerThread("CameraThread").apply {start()}
handler = Handler(handlerThread!!.looper)
val camManager = context.getSystemService(Context.CAMERA_SERVICE) as CameraManager
val camID = getDepSupportedCamID(camManager)
val characteristics = camID?.let {camManager.getCameraCharacteristics(it)}
val capabilities =
characteristics?.get(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES)
supportsDep =
(capabilities?.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT) == true)
if(supportsDep) {
showError("No DEPTH16 supported camera found.")
Log.d("CamPreview", "No DEPTH16 supported camera found.")
onDismiss()
return
}
if(camID == null) {
showError("No camera found.")
Log.d("CamPreview", "No camera found.")
onDismiss()
return
}
if(ContextCompat.checkSelfPermission(
context,
Manifest.permission.CAMERA
) == PackageManager.PERMISSION_GRANTED
) {
camManager.openCamera(camID, object: CameraDevice.StateCallback() {
override fun onOpened(camera: CameraDevice) {
cameraDevice = camera
val characteristics = camID?.let {camManager.getCameraCharacteristics(it)}
val map =
characteristics?.get(CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP)
val previewSize =
map?.getOutputSizes(SurfaceHolder::class.java)
?.maxByOrNull {it.width * it.height} ?: Size(640, 480)
val width = previewSize.width
val height = previewSize.height
val surface = android.view.Surface(surfaceTexture)
imageReader =
ImageReader.newInstance(width, height, ImageFormat.JPEG, 5).apply {
setOnImageAvailableListener({reader ->
val image = reader.acquireNextImage()
val buffer = image.planes[0].buffer
val data = ByteArray(buffer.remaining())
buffer.get(data)
image.close()
Log.d("CamPreview", "I'm here!")
val bitmap = BitmapFactory.decodeByteArray(data, 0, data.size)
onImageCaptured(bitmap)
}, handler)
}
depthImageReader =
ImageReader.newInstance(width, height, ImageFormat.DEPTH16, 5).apply {
setOnImageAvailableListener({reader ->
val image = reader.acquireNextImage()
val buffer = image.planes[0].buffer
val data = ByteArray(buffer.remaining())
buffer.get(data)
onImageDCaptured(data)
image.close()
}, handler)
}
val surfaces: List<Surface>
if(supportsDep) surfaces =
listOf(surface, imageReader!!.surface, depthImageReader!!.surface)
else surfaces = listOf(surface, imageReader!!.surface)
camera.createCaptureSession(
surfaces,
object: CameraCaptureSession.StateCallback() {
override fun onConfigured(session: CameraCaptureSession) {
captureSession = session
session.setRepeatingRequest(
camera.createCaptureRequest(CameraDevice.TEMPLATE_PREVIEW)
.apply {
addTarget(surface)
}.build(),
null,
handler
)
}
override fun onConfigureFailed(session: CameraCaptureSession) {
showError("Camera configuration failed.")
Log.d("CamPreview", "Camera configuration failed.")
onDismiss()
}
},
handler
)
}
override fun onDisconnected(camera: CameraDevice) {
cameraDevice?.close()
}
override fun onError(camera: CameraDevice, error: Int) {
showError("Camera error: $error")
Log.d("CamPreview", "Camera error: $error")
onDismiss()
}
}, handler)
}
}
fun getDepSupportedCamID(camManager: CameraManager): String? {
for(cameraId in camManager.cameraIdList) {
val characteristics = camManager.getCameraCharacteristics(cameraId)
// Check the camera's facing direction
val facing = characteristics.get(CameraCharacteristics.LENS_FACING)
// Check if the device supports DEPTH16 format
val capabilities =
characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES)
if(capabilities?.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT) == true) {
// Prefer rear-facing camera
if(facing == CameraCharacteristics.LENS_FACING_BACK) {
return cameraId
}
}
}
// If no rear-facing camera found, return another camera ID that supports DEPTH16
for(cameraId in camManager.cameraIdList) {
val characteristics = camManager.getCameraCharacteristics(cameraId)
val capabilities =
characteristics.get(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES)
if(capabilities?.contains(CameraCharacteristics.REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT) == true) {
return cameraId
}
}
// If no camera supports DEPTH16, return rear-facing camera ID
for(cameraId in camManager.cameraIdList) {
val characteristics = camManager.getCameraCharacteristics(cameraId)
val facing = characteristics.get(CameraCharacteristics.LENS_FACING)
if(facing == CameraCharacteristics.LENS_FACING_BACK) {
return cameraId
}
}
return null
}
fun showError(message: String) {
errorMessage = message
}
override fun onSurfaceTextureSizeChanged(surface: SurfaceTexture, width: Int, height: Int) {}
override fun onSurfaceTextureUpdated(surface: SurfaceTexture) {}
override fun onSurfaceTextureDestroyed(surface: SurfaceTexture): Boolean {
return true
}
}
}
},
modifier = Modifier.fillMaxSize()
)
errorMessage?.let {message ->
AlertDialog(
onDismissRequest = {errorMessage = null;},
title = {Text("Tip")},
text = {Text(message)},
confirmButton = {
Button(onClick = {errorMessage = null;}) {Text("OK")}
}
)
}
Button(
onClick = {
cameraDevice?.createCaptureRequest(CameraDevice.TEMPLATE_STILL_CAPTURE)?.apply {
addTarget(imageReader!!.surface)
if(supportsDep) addTarget(depthImageReader!!.surface)
}?.build()?.let {
captureSession?.capture(it, null, handler)
}
onDismiss()
},
modifier = Modifier
.align(Alignment.BottomCenter)
.padding(16.dp)
) {
Text("Capture")
}
}
// Releasing Resources
DisposableEffect(Unit) {
onDispose {
captureSession?.close()
cameraDevice?.close()
imageReader?.close()
depthImageReader?.close()
handlerThread?.quitSafely()
}
}
}
I expected the listener in ImageReader.setOnImageAvailableListener() to execute its code block after each capture request, allowing me to retrieve the image and process it. The preview of the camera worked fine, but nothing was captured after all these processes. I tried logging at some important places, but it only shows that everything worked well except setOnImageAvailableListener()
.
What could be causing ImageReader.setOnImageAvailableListener() to skip executing its code block, and what steps can I take to troubleshoot this issue further? Is there something specific to Camera2’s CaptureRequest or ImageReader setup that I may have overlooked?
BTW, I've tried running this app on different types of phones, which I'm sure that they should support outputs of depth data, but none of them responded with support on DEPTH16 images. I don't know if these phones really don't support this type of images, or if there's something wrong with the way I'm requesting it.
Thank you for your help!
Replying to comment of dev.bmax: I tried and changed my code to a newer version, but it still cannot work. Would you mind giving some more & clearer suggestions? thx
Update: I tried to do this using an another method, but had faced the same problem. The code and the question above has been updated to a newer version. pls help.
Upvotes: 0
Views: 101
Reputation: 475
Looks like you are not adding other streams when configuring your camera session.
can try something like this:
camDevice.createCaptureSession( listOf(previewSurface, imgReader.getSurface()... ) ...
Upvotes: 0