Reputation: 171
I am reading each frame of video and adding time stamp to it as given below.
command = ['ffmpeg',
'-y', # (optional) overwrite output file if it exists
'-f', 'rawvideo', #Input is raw video
'-pix_fmt', 'bgr24', #Raw video format
'-s', str(int(width)) + 'x' + str(int(height)), # size of one frame
'-i', '-', # The input comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
'-vcodec', 'mpeg4',
'-b:v', '10M', #Sets a maximum bit rate
Output_name]
#Open the pipe
pipe = sp.Popen(command, stdin=sp.PIPE, stderr=sp.PIPE)
print('Processing....')
print(' ')
#Reads through each frame, calculates the timestamp, places it on the frame and exports the frame to the output video.
#import pdb
#pdb.set_trace()
while current_frame < total_frames:
success, image = video.read()
if success:
elapsed_time = video.get(cv2.CAP_PROP_POS_MSEC)
current_frame = video.get(cv2.CAP_PROP_POS_FRAMES)
timestamp = initial + dt.timedelta(microseconds = elapsed_time*1000)
cv2.putText(image, 'Date: ' + str(timestamp)[0:10], (50,int(height-150)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 255, 255), 3)
cv2.putText(image, 'Time: ' + str(timestamp)[11:-4], (50,int(height-100)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 255, 255), 3)
pipe.stdin.write(image.tostring())
print('frame number',current_frame)
else:
print('video reader fail')
video.release()
pipe.stdin.close()
pipe.stderr.close()
However, after around 18k frames, Python gets stuck at 'pipe.stdin.write(image.tostring())'. It does not produce any error, but simply hangs. How to resolve this issue?
Thanks in advance.
Upvotes: 3
Views: 1694
Reputation: 32124
I think I solved the puzzle:
stderr buffer is filled up and the process gets stuck.
I managed to reproduce the problem under Windows 10.
stderr
from time to time. stderr=sp.PIPE
, but not reading from stderr
.stderr
buffer filled up, and the process gets stuck. You may either remove stderr=sp.PIPE
, or making sure to read the data from stderr
.
Reading data from stderr
may be preformed using a Thread:
# Read from pipe.stdrr for "draining the pipe"
def drain_stderr():
while True:
try:
stderr_output = pipe.stderr.readline()
except:
pass
I created a "self contained" code sample that generates synthetic video file, and executes the code using the synthetic video as input.
Here is the testing code sample:
import numpy as np
import cv2
import subprocess as sp
import threading
import datetime as dt
# Generate synthetic video file - resolution 640x480, 30000 frames, 1 fps
# H.264 encoded video (for testing):
#########################################################################
input_name = 'test.mp4'
width, height = 640, 480
total_frames = 30000
sp.run('ffmpeg -y -f lavfi -i testsrc=size={}x{}:rate=1 -vcodec libx264 -crf 23 -t {} {}'.format(width, height, total_frames, input_name))
#########################################################################
# Read from pipe.stdrr for "draining the pipe"
def drain_stderr():
while keep_drain_stderr:
try:
stderr_output = pipe.stderr.readline()
except:
pass
Output_name = 'out.mp4'
command = ['ffmpeg',
'-y', # (optional) overwrite output file if it exists
'-f', 'rawvideo', #Input is raw video
'-pix_fmt', 'bgr24', #Raw video format
'-s', str(int(width)) + 'x' + str(int(height)), # size of one frame
'-i', '-', # The input comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
'-vcodec', 'mpeg4',
'-b:v', '10M', #Sets a maximum bit rate
Output_name]
# Open the pipe
pipe = sp.Popen(command, stdin=sp.PIPE, stderr=sp.PIPE)
keep_drain_stderr = True
thread = threading.Thread(target=drain_stderr)
thread.start()
# Open video file for reading
video = cv2.VideoCapture(input_name)
print('Processing....')
print(' ')
#Reads through each frame, calculates the timestamp, places it on the frame and exports the frame to the output video.
#import pdb
#pdb.set_trace()
initial = dt.timedelta(microseconds=0*1000)
current_frame = 0
while current_frame < total_frames:
success, image = video.read()
if success:
elapsed_time = video.get(cv2.CAP_PROP_POS_MSEC)
current_frame = video.get(cv2.CAP_PROP_POS_FRAMES)
timestamp = initial + dt.timedelta(microseconds=elapsed_time*1000)
cv2.putText(image, 'Date: ' + str(timestamp)[0:10], (50,int(height-150)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 255, 255), 3)
cv2.putText(image, 'Time: ' + str(timestamp)[11:-4], (50,int(height-100)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 255, 255), 3)
pipe.stdin.write(image.tostring())
print('frame number', current_frame)
else:
print('video reader fail')
keep_drain_stderr = False
video.release()
pipe.stdin.close()
pipe.stderr.close()
#Wait 3 seconds before killing FFmpeg
try:
pipe.wait(3)
except (sp.TimeoutExpired):
pipe.kill()
thread.join()
Upvotes: 3