import os
import sys
import re
import glob
import numpy as np
from pathlib import Path
from skimage import io
from skimage.util import img_as_ubyte
from deeplabcut.utils import frameselectiontools
from deeplabcut.utils import auxiliaryfunctions
config_file = Path(config).resolve()
cfg = auxiliaryfunctions.read_config(config_file)
print("Config file read successfully.")
if videos_list is None:
videos = cfg.get("video_sets_original") or cfg["video_sets"]
else: # filter video_list by the ones in the config file
videos = [v for v in cfg["video_sets"] if v in videos_list]
if mode == "manual":
from deeplabcut.gui.widgets import launch_napari
_ = launch_napari(videos[0])
return
elif mode == "automatic":
numframes2pick = cfg["numframes2pick"]
start = cfg["start"]
stop = cfg["stop"]
# Check for variable correctness
if start > 1 or stop > 1 or start < 0 or stop < 0 or start >= stop:
raise Exception(
"Erroneous start or stop values. Please correct it in the config file."
)
if numframes2pick < 1 and not int(numframes2pick):
raise Exception(
"Perhaps consider extracting more, or a natural number of frames."
)
if opencv:
from deeplabcut.utils.auxfun_videos import VideoWriter
else:
from moviepy.editor import VideoFileClip
has_failed = []
for video in videos:
if userfeedback:
print(
"Do you want to extract (perhaps additional) frames for video:",
video,
"?",
)
askuser = input("yes/no")
else:
askuser = "yes"
if (
askuser == "y"
or askuser == "yes"
or askuser == "Ja"
or askuser == "ha"
or askuser == "oui"
or askuser == "ouais"
): # multilanguage support :)
if opencv:
cap = VideoWriter(video)
nframes = len(cap)
else:
# Moviepy:
clip = VideoFileClip(video)
fps = clip.fps
nframes = int(np.ceil(clip.duration * 1.0 / fps))
if not nframes:
print("Video could not be opened. Skipping...")
continue
indexlength = int(np.ceil(np.log10(nframes)))
fname = Path(video)
output_path = Path(config).parents[0] / "labeled-data" / fname.stem
if output_path.exists():
if len(os.listdir(output_path)):
if userfeedback:
askuser = input(
"The directory already contains some frames. Do you want to add to it?(yes/no): "
)
if not (
askuser == "y"
or askuser == "yes"
or askuser == "Y"
or askuser == "Yes"
):
sys.exit("Delete the frames and try again later!")
if crop == "GUI":
cfg = select_cropping_area(config, [video])
try:
coords = cfg["video_sets"][video]["crop"].split(",")
except KeyError:
coords = cfg["video_sets_original"][video]["crop"].split(",")
if crop:
if opencv:
cap.set_bbox(*map(int, coords))
else:
clip = clip.crop(
y1=int(coords[2]),
y2=int(coords[3]),
x1=int(coords[0]),
x2=int(coords[1]),
)
else:
coords = None
print("Extracting frames based on %s ..." % algo)
if algo == "uniform":
if opencv:
frames2pick = frameselectiontools.UniformFramescv2(
cap, numframes2pick, start, stop
)
else:
frames2pick = frameselectiontools.UniformFrames(
clip, numframes2pick, start, stop
)
elif algo == "kmeans":
if opencv:
frames2pick = frameselectiontools.KmeansbasedFrameselectioncv2(
cap,
numframes2pick,
start,
stop,
step=cluster_step,
resizewidth=cluster_resizewidth,
color=cluster_color,
)
else:
frames2pick = frameselectiontools.KmeansbasedFrameselection(
clip,
numframes2pick,
start,
stop,
step=cluster_step,
resizewidth=cluster_resizewidth,
color=cluster_color,
)
else:
print(
"Please implement this method yourself and send us a pull request! Otherwise, choose 'uniform' or 'kmeans'."
)
frames2pick = []
if not len(frames2pick):
print("Frame selection failed...")
return
output_path = (
Path(config).parents[0] / "labeled-data" / Path(video).stem
)
output_path.mkdir(parents=True, exist_ok=True)
is_valid = []
if opencv:
for index in frames2pick:
cap.set_to_frame(index) # extract a particular frame
frame = cap.read_frame(crop=True)
if frame is not None:
image = img_as_ubyte(frame)
img_name = (
str(output_path)
+ "/img"
+ str(index).zfill(indexlength)
+ ".png"
)
io.imsave(img_name, image)
is_valid.append(True)
else:
print("Frame", index, " not found!")
is_valid.append(False)
cap.close()
else:
for index in frames2pick:
try:
image = img_as_ubyte(clip.get_frame(index * 1.0 / clip.fps))
img_name = (
str(output_path)
+ "/img"
+ str(index).zfill(indexlength)
+ ".png"
)
io.imsave(img_name, image)
if np.var(image) == 0: # constant image
print(
"Seems like black/constant images are extracted from your video. Perhaps consider using opencv under the hood, by setting: opencv=True"
)
is_valid.append(True)
except FileNotFoundError:
print("Frame # ", index, " does not exist.")
is_valid.append(False)
clip.close()
del clip
if not any(is_valid):
has_failed.append(True)
else:
has_failed.append(False)
else: # NO!
has_failed.append(False)
if all(has_failed):
print("Frame extraction failed. Video files must be corrupted.")
return
elif any(has_failed):
print("Although most frames were extracted, some were invalid.")
else:
print(
"Frames were successfully extracted, for the videos listed in the config.yaml file."
)
print(
"\nYou can now label the frames using the function 'label_frames' "
"(Note, you should label frames extracted from diverse videos (and many videos; we do not recommend training on single videos!))."
)
elif mode == "match":
import cv2
config_file = Path(config).resolve()
cfg = auxiliaryfunctions.read_config(config_file)
print("Config file read successfully.")
videos = sorted(cfg["video_sets"].keys())
if videos_list is not None: # filter video_list by the ones in the config file
videos = [v for v in videos if v in videos_list]
project_path = Path(config).parents[0]
labels_path = os.path.join(project_path, "labeled-data/")
video_dir = os.path.join(project_path, "videos/")
try:
cfg_3d = auxiliaryfunctions.read_config(config3d)
except:
raise Exception(
"You must create a 3D project and edit the 3D config file before extracting matched frames. \n"
)
cams = cfg_3d["camera_names"]
extCam_name = cams[extracted_cam]
del cams[extracted_cam]
label_dirs = sorted(
glob.glob(os.path.join(labels_path, "*" + extCam_name + "*"))
)
# select crop method
crop_list = []
for video in videos:
if extCam_name in video:
if crop == "GUI":
cfg = select_cropping_area(config, [video])
print("in gui code")
coords = cfg["video_sets"][video]["crop"].split(",")
if crop and not opencv:
clip = clip.crop(
y1=int(coords[2]),
y2=int(coords[3]),
x1=int(coords[0]),
x2=int(coords[1]),
)
elif not crop:
coords = None
crop_list.append(coords)
for coords, dirPath in zip(crop_list, label_dirs):
extracted_images = glob.glob(os.path.join(dirPath, "*png"))
imgPattern = re.compile("[0-9]{1,10}")
for cam in cams:
output_path = re.sub(extCam_name, cam, dirPath)
for fname in os.listdir(output_path):
if fname.endswith(".png"):
os.remove(os.path.join(output_path, fname))
# Find the matching video from the config `video_sets`,
# as it may be stored elsewhere than in the `videos` directory.
video_name = os.path.basename(output_path)
vid = ""
for video in cfg["video_sets"]:
if video_name in video:
vid = video
break
if not vid:
raise ValueError(f"Video {video_name} not found...")
cap = cv2.VideoCapture(vid)
print("\n extracting matched frames from " + video_name)
for img in extracted_images:
imgNum = re.findall(imgPattern, os.path.basename(img))[0]
cap.set(1, int(imgNum))
ret, frame = cap.read()
if ret:
image = img_as_ubyte(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
img_name = os.path.join(output_path, "img" + imgNum + ".png")
if crop:
io.imsave(
img_name,
image[
int(coords[2]) : int(coords[3]),
int(coords[0]) : int(coords[1]),
:,
],
)
else:
io.imsave(img_name, image)
print(
"\n Done extracting matched frames. You can now begin labeling frames using the function label_frames\n"
)
else:
print(
"Invalid MODE. Choose either 'manual', 'automatic' or 'match'. Check ``help(deeplabcut.extract_frames)`` on python and ``deeplabcut.extract_frames?`` \
for ipython/jupyter notebook for more details."
)
extract_frames
最新推荐文章于 2024-06-07 10:49:04 发布