Add support for videos using ffmpeg for transcoding to webm format and HTML5 video tag.

This commit is contained in:
Jerome Charaoui 2013-12-20 22:40:25 -05:00
parent 9c8beb0cc5
commit 6f482f8f78
5 changed files with 194 additions and 51 deletions

View File

@ -6,6 +6,8 @@ import os.path
from PIL import Image from PIL import Image
from PIL.ExifTags import TAGS from PIL.ExifTags import TAGS
import gc import gc
import tempfile
import subprocess
class Album(object): class Album(object):
def __init__(self, path): def __init__(self, path):
@ -108,6 +110,7 @@ class Photo(object):
def __init__(self, path, thumb_path=None, attributes=None): def __init__(self, path, thumb_path=None, attributes=None):
self._path = trim_base(path) self._path = trim_base(path)
self.is_valid = True self.is_valid = True
image = None
try: try:
mtime = file_mtime(path) mtime = file_mtime(path)
except KeyboardInterrupt: except KeyboardInterrupt:
@ -120,17 +123,26 @@ class Photo(object):
return return
self._attributes = {} self._attributes = {}
self._attributes["dateTimeFile"] = mtime self._attributes["dateTimeFile"] = mtime
self._attributes["mediaType"] = "photo"
try: try:
image = Image.open(path) image = Image.open(path)
except KeyboardInterrupt: except KeyboardInterrupt:
raise raise
except: except:
self._video_metadata(path)
if isinstance(image, Image.Image):
self._photo_metadata(image)
self._photo_thumbnails(image, thumb_path, path)
elif self._attributes["mediaType"] == "video":
self._video_metadata(path)
self._video_thumbnails(thumb_path, path)
self._video_transcode(thumb_path, path)
else:
self.is_valid = False self.is_valid = False
return
self._metadata(image) def _photo_metadata(self, image):
self._thumbnails(image, thumb_path, path)
def _metadata(self, image):
self._attributes["size"] = image.size self._attributes["size"] = image.size
self._orientation = 1 self._orientation = 1
try: try:
@ -160,8 +172,8 @@ class Photo(object):
self._orientation = exif["Orientation"]; self._orientation = exif["Orientation"];
if self._orientation in range(5, 9): if self._orientation in range(5, 9):
self._attributes["size"] = (self._attributes["size"][1], self._attributes["size"][0]) self._attributes["size"] = (self._attributes["size"][1], self._attributes["size"][0])
if self._orientation - 1 < len(self._metadata.orientation_list): if self._orientation - 1 < len(self._photo_metadata.orientation_list):
self._attributes["orientation"] = self._metadata.orientation_list[self._orientation - 1] self._attributes["orientation"] = self._photo_metadata.orientation_list[self._orientation - 1]
if "Make" in exif: if "Make" in exif:
self._attributes["make"] = exif["Make"] self._attributes["make"] = exif["Make"]
if "Model" in exif: if "Model" in exif:
@ -180,32 +192,32 @@ class Photo(object):
self._attributes["iso"] = exif["PhotographicSensitivity"] self._attributes["iso"] = exif["PhotographicSensitivity"]
if "ExposureTime" in exif: if "ExposureTime" in exif:
self._attributes["exposureTime"] = exif["ExposureTime"] self._attributes["exposureTime"] = exif["ExposureTime"]
if "Flash" in exif and exif["Flash"] in self._metadata.flash_dictionary: if "Flash" in exif and exif["Flash"] in self._photo_metadata.flash_dictionary:
try: try:
self._attributes["flash"] = self._metadata.flash_dictionary[exif["Flash"]] self._attributes["flash"] = self._photo_metadata.flash_dictionary[exif["Flash"]]
except KeyboardInterrupt: except KeyboardInterrupt:
raise raise
except: except:
pass pass
if "LightSource" in exif and exif["LightSource"] in self._metadata.light_source_dictionary: if "LightSource" in exif and exif["LightSource"] in self._photo_metadata.light_source_dictionary:
try: try:
self._attributes["lightSource"] = self._metadata.light_source_dictionary[exif["LightSource"]] self._attributes["lightSource"] = self._photo_metadata.light_source_dictionary[exif["LightSource"]]
except KeyboardInterrupt: except KeyboardInterrupt:
raise raise
except: except:
pass pass
if "ExposureProgram" in exif and exif["ExposureProgram"] < len(self._metadata.exposure_list): if "ExposureProgram" in exif and exif["ExposureProgram"] < len(self._photo_metadata.exposure_list):
self._attributes["exposureProgram"] = self._metadata.exposure_list[exif["ExposureProgram"]] self._attributes["exposureProgram"] = self._photo_metadata.exposure_list[exif["ExposureProgram"]]
if "SpectralSensitivity" in exif: if "SpectralSensitivity" in exif:
self._attributes["spectralSensitivity"] = exif["SpectralSensitivity"] self._attributes["spectralSensitivity"] = exif["SpectralSensitivity"]
if "MeteringMode" in exif and exif["MeteringMode"] < len(self._metadata.metering_list): if "MeteringMode" in exif and exif["MeteringMode"] < len(self._photo_metadata.metering_list):
self._attributes["meteringMode"] = self._metadata.metering_list[exif["MeteringMode"]] self._attributes["meteringMode"] = self._photo_metadata.metering_list[exif["MeteringMode"]]
if "SensingMethod" in exif and exif["SensingMethod"] < len(self._metadata.sensing_method_list): if "SensingMethod" in exif and exif["SensingMethod"] < len(self._photo_metadata.sensing_method_list):
self._attributes["sensingMethod"] = self._metadata.sensing_method_list[exif["SensingMethod"]] self._attributes["sensingMethod"] = self._photo_metadata.sensing_method_list[exif["SensingMethod"]]
if "SceneCaptureType" in exif and exif["SceneCaptureType"] < len(self._metadata.scene_capture_type_list): if "SceneCaptureType" in exif and exif["SceneCaptureType"] < len(self._photo_metadata.scene_capture_type_list):
self._attributes["sceneCaptureType"] = self._metadata.scene_capture_type_list[exif["SceneCaptureType"]] self._attributes["sceneCaptureType"] = self._photo_metadata.scene_capture_type_list[exif["SceneCaptureType"]]
if "SubjectDistanceRange" in exif and exif["SubjectDistanceRange"] < len(self._metadata.subject_distance_range_list): if "SubjectDistanceRange" in exif and exif["SubjectDistanceRange"] < len(self._photo_metadata.subject_distance_range_list):
self._attributes["subjectDistanceRange"] = self._metadata.subject_distance_range_list[exif["SubjectDistanceRange"]] self._attributes["subjectDistanceRange"] = self._photo_metadata.subject_distance_range_list[exif["SubjectDistanceRange"]]
if "ExposureCompensation" in exif: if "ExposureCompensation" in exif:
self._attributes["exposureCompensation"] = exif["ExposureCompensation"] self._attributes["exposureCompensation"] = exif["ExposureCompensation"]
if "ExposureBiasValue" in exif: if "ExposureBiasValue" in exif:
@ -215,14 +227,37 @@ class Photo(object):
if "DateTime" in exif: if "DateTime" in exif:
self._attributes["dateTime"] = exif["DateTime"] self._attributes["dateTime"] = exif["DateTime"]
_metadata.flash_dictionary = {0x0: "No Flash", 0x1: "Fired",0x5: "Fired, Return not detected",0x7: "Fired, Return detected",0x8: "On, Did not fire",0x9: "On, Fired",0xd: "On, Return not detected",0xf: "On, Return detected",0x10: "Off, Did not fire",0x14: "Off, Did not fire, Return not detected",0x18: "Auto, Did not fire",0x19: "Auto, Fired",0x1d: "Auto, Fired, Return not detected",0x1f: "Auto, Fired, Return detected",0x20: "No flash function",0x30: "Off, No flash function",0x41: "Fired, Red-eye reduction",0x45: "Fired, Red-eye reduction, Return not detected",0x47: "Fired, Red-eye reduction, Return detected",0x49: "On, Red-eye reduction",0x4d: "On, Red-eye reduction, Return not detected",0x4f: "On, Red-eye reduction, Return detected",0x50: "Off, Red-eye reduction",0x58: "Auto, Did not fire, Red-eye reduction",0x59: "Auto, Fired, Red-eye reduction",0x5d: "Auto, Fired, Red-eye reduction, Return not detected",0x5f: "Auto, Fired, Red-eye reduction, Return detected"} _photo_metadata.flash_dictionary = {0x0: "No Flash", 0x1: "Fired",0x5: "Fired, Return not detected",0x7: "Fired, Return detected",0x8: "On, Did not fire",0x9: "On, Fired",0xd: "On, Return not detected",0xf: "On, Return detected",0x10: "Off, Did not fire",0x14: "Off, Did not fire, Return not detected",0x18: "Auto, Did not fire",0x19: "Auto, Fired",0x1d: "Auto, Fired, Return not detected",0x1f: "Auto, Fired, Return detected",0x20: "No flash function",0x30: "Off, No flash function",0x41: "Fired, Red-eye reduction",0x45: "Fired, Red-eye reduction, Return not detected",0x47: "Fired, Red-eye reduction, Return detected",0x49: "On, Red-eye reduction",0x4d: "On, Red-eye reduction, Return not detected",0x4f: "On, Red-eye reduction, Return detected",0x50: "Off, Red-eye reduction",0x58: "Auto, Did not fire, Red-eye reduction",0x59: "Auto, Fired, Red-eye reduction",0x5d: "Auto, Fired, Red-eye reduction, Return not detected",0x5f: "Auto, Fired, Red-eye reduction, Return detected"}
_metadata.light_source_dictionary = {0: "Unknown", 1: "Daylight", 2: "Fluorescent", 3: "Tungsten (incandescent light)", 4: "Flash", 9: "Fine weather", 10: "Cloudy weather", 11: "Shade", 12: "Daylight fluorescent (D 5700 - 7100K)", 13: "Day white fluorescent (N 4600 - 5400K)", 14: "Cool white fluorescent (W 3900 - 4500K)", 15: "White fluorescent (WW 3200 - 3700K)", 17: "Standard light A", 18: "Standard light B", 19: "Standard light C", 20: "D55", 21: "D65", 22: "D75", 23: "D50", 24: "ISO studio tungsten"} _photo_metadata.light_source_dictionary = {0: "Unknown", 1: "Daylight", 2: "Fluorescent", 3: "Tungsten (incandescent light)", 4: "Flash", 9: "Fine weather", 10: "Cloudy weather", 11: "Shade", 12: "Daylight fluorescent (D 5700 - 7100K)", 13: "Day white fluorescent (N 4600 - 5400K)", 14: "Cool white fluorescent (W 3900 - 4500K)", 15: "White fluorescent (WW 3200 - 3700K)", 17: "Standard light A", 18: "Standard light B", 19: "Standard light C", 20: "D55", 21: "D65", 22: "D75", 23: "D50", 24: "ISO studio tungsten"}
_metadata.metering_list = ["Unknown", "Average", "Center-weighted average", "Spot", "Multi-spot", "Multi-segment", "Partial"] _photo_metadata.metering_list = ["Unknown", "Average", "Center-weighted average", "Spot", "Multi-spot", "Multi-segment", "Partial"]
_metadata.exposure_list = ["Not Defined", "Manual", "Program AE", "Aperture-priority AE", "Shutter speed priority AE", "Creative (Slow speed)", "Action (High speed)", "Portrait", "Landscape", "Bulb"] _photo_metadata.exposure_list = ["Not Defined", "Manual", "Program AE", "Aperture-priority AE", "Shutter speed priority AE", "Creative (Slow speed)", "Action (High speed)", "Portrait", "Landscape", "Bulb"]
_metadata.orientation_list = ["Horizontal (normal)", "Mirror horizontal", "Rotate 180", "Mirror vertical", "Mirror horizontal and rotate 270 CW", "Rotate 90 CW", "Mirror horizontal and rotate 90 CW", "Rotate 270 CW"] _photo_metadata.orientation_list = ["Horizontal (normal)", "Mirror horizontal", "Rotate 180", "Mirror vertical", "Mirror horizontal and rotate 270 CW", "Rotate 90 CW", "Mirror horizontal and rotate 90 CW", "Rotate 270 CW"]
_metadata.sensing_method_list = ["Not defined", "One-chip color area sensor", "Two-chip color area sensor", "Three-chip color area sensor", "Color sequential area sensor", "Trilinear sensor", "Color sequential linear sensor"] _photo_metadata.sensing_method_list = ["Not defined", "One-chip color area sensor", "Two-chip color area sensor", "Three-chip color area sensor", "Color sequential area sensor", "Trilinear sensor", "Color sequential linear sensor"]
_metadata.scene_capture_type_list = ["Standard", "Landscape", "Portrait", "Night scene"] _photo_metadata.scene_capture_type_list = ["Standard", "Landscape", "Portrait", "Night scene"]
_metadata.subject_distance_range_list = ["Unknown", "Macro", "Close view", "Distant view"] _photo_metadata.subject_distance_range_list = ["Unknown", "Macro", "Close view", "Distant view"]
def _video_metadata(self, path, original=True):
try:
p = subprocess.check_output(['/usr/bin/ffprobe', '-show_format', '-show_streams', '-of', 'json', '-loglevel', '0', path])
except KeyboardInterrupt:
raise
except:
return
info = json.loads(p)
for s in info["streams"]:
if 'codec_type' in s and s['codec_type'] == 'video':
if original:
self._attributes["mediaType"] = "video"
self._attributes["originalSize"] = (int(s["width"]), int(s["height"]))
self._attributes["duration"] = s["duration"]
if "tags" in s:
# creation_time only in UTC!
# https://code.google.com/p/android/issues/detail?id=60225#c6
#self._attributes["dateTime"] = s["tags"]["creation_time"]
if "rotate" in s["tags"]:
self._attributes["rotate"] = s["tags"]["rotate"]
else:
self._attributes["size"] = (int(s["width"]), int(s["height"]))
def _thumbnail(self, image, thumb_path, original_path, size, square=False): def _thumbnail(self, image, thumb_path, original_path, size, square=False):
thumb_path = os.path.join(thumb_path, image_cache(self._path, size, square)) thumb_path = os.path.join(thumb_path, image_cache(self._path, size, square))
@ -268,7 +303,7 @@ class Photo(object):
message("save failure", os.path.basename(thumb_path)) message("save failure", os.path.basename(thumb_path))
os.unlink(thumb_path) os.unlink(thumb_path)
def _thumbnails(self, image, thumb_path, original_path): def _photo_thumbnails(self, image, thumb_path, original_path):
mirror = image mirror = image
if self._orientation == 2: if self._orientation == 2:
# Vertical Mirror # Vertical Mirror
@ -293,6 +328,73 @@ class Photo(object):
mirror = image.transpose(Image.ROTATE_90) mirror = image.transpose(Image.ROTATE_90)
for size in Photo.thumb_sizes: for size in Photo.thumb_sizes:
self._thumbnail(mirror, thumb_path, original_path, size[0], size[1]) self._thumbnail(mirror, thumb_path, original_path, size[0], size[1])
def _video_thumbnails(self, thumb_path, original_path):
(tfd, tfn) = tempfile.mkstemp();
try:
subprocess.check_call(['/usr/bin/ffmpeg', '-i', original_path, '-f', 'image2', '-vsync', '1', '-vframes', '1', '-an', '-loglevel', 'quiet', tfn])
except KeyboardInterrupt:
os.unlink(tfn)
raise
except:
message("couldn't extract video frame", os.path.basename(original_path))
os.unlink(tfn)
return
try:
image = Image.open(tfn)
except KeyboardInterrupt:
raise
except:
message("couldn't open video thumbnail", tfn)
os.unlink(tfn)
return
mirror = image
if "rotate" in self._attributes:
if self._attributes["rotate"] == "90":
mirror = image.transpose(Image.ROTATE_270)
elif self._attributes["rotate"] == "180":
mirror = image.transpose(Image.ROTATE_180)
elif self._attributes["rotate"] == "270":
mirror = image.transpose(Image.ROTATE_90)
for size in Photo.thumb_sizes:
if size[1]:
self._thumbnail(mirror, thumb_path, original_path, size[0], size[1])
os.unlink(tfn)
def _video_transcode(self, transcode_path, original_path):
transcode_path = os.path.join(transcode_path, cache_base(self._path) + '.webm')
transcode_cmd = ['/usr/bin/ffmpeg', '-i', original_path, '-c:v', 'libvpx', '-crf', '10', '-b:v', '800k', '-c:a', 'libvorbis', '-f', 'webm', '-threads', '2', '-loglevel', '0', '-y']
filters = []
info_string = "%s -> webm" % (os.path.basename(original_path))
message("transcoding", info_string)
if os.path.exists(transcode_path) and file_mtime(transcode_path) >= self._attributes["dateTimeFile"]:
return
if "originalSize" in self._attributes and self._attributes["originalSize"][1] > 720:
filters.append("scale=trunc(oh*a/2)*2:min(720\,iw)")
if "rotate" in self._attributes:
if self._attributes["rotate"] == "90":
filters.append('transpose=1')
elif self._attributes["rotate"] == "180":
filters.append('vflip,hflip')
elif self._attributes["rotate"] == "270":
filters.append('transpose=2')
if len(filters):
transcode_cmd.append('-vf')
transcode_cmd.append(','.join(filters))
transcode_cmd.append(transcode_path)
try:
subprocess.call(transcode_cmd)
except KeyboardInterrupt:
raise
except:
message("transcoding failure", os.path.basename(original_path))
try:
os.unlink(transcode_path)
except:
pass
return
self._video_metadata(transcode_path, False)
@property @property
def name(self): def name(self):
return os.path.basename(self._path) return os.path.basename(self._path)
@ -303,7 +405,15 @@ class Photo(object):
return self._path return self._path
@property @property
def image_caches(self): def image_caches(self):
return [image_cache(self._path, size[0], size[1]) for size in Photo.thumb_sizes] caches = []
if "mediaType" in self._attributes and self._attributes["mediaType"] == "video":
for size in Photo.thumb_sizes:
if size[1]:
caches.append(image_cache(self._path, size[0], size[1]))
caches.append(cache_base(self._path) + '.webm')
else:
caches = [image_cache(self._path, size[0], size[1]) for size in Photo.thumb_sizes]
return caches
@property @property
def date(self): def date(self):
if not self.is_valid: if not self.is_valid:

View File

@ -91,9 +91,14 @@ a:hover {
right: 0; right: 0;
text-align: center; text-align: center;
} }
#photo-box { #photo-box, #video-box {
display: inline; display: inline;
} }
#video-box-inner {
position: absolute;
top: 50%;
width: 100%;
}
#photo-links { #photo-links {
background-color: #000000; background-color: #000000;
font-weight: bold; font-weight: bold;

View File

@ -21,6 +21,11 @@
<div id="metadata"></div> <div id="metadata"></div>
</div> </div>
</div> </div>
<div id="video-box">
<div id="video-box-inner">
<video id="video" controls></video>
</div>
</div>
<a id="back">&lsaquo;</a> <a id="back">&lsaquo;</a>
<a id="next">&rsaquo;</a> <a id="next">&rsaquo;</a>

View File

@ -144,6 +144,9 @@
suffix = size.toString(); suffix = size.toString();
return "cache/" + PhotoFloat.cachePath(PhotoFloat.photoHash(album, photo) + "_" + suffix + ".jpg"); return "cache/" + PhotoFloat.cachePath(PhotoFloat.photoHash(album, photo) + "_" + suffix + ".jpg");
}; };
PhotoFloat.videoPath = function(album, video) {
return "cache/" + PhotoFloat.cachePath(PhotoFloat.photoHash(album, video) + ".webm");
};
PhotoFloat.originalPhotoPath = function(album, photo) { PhotoFloat.originalPhotoPath = function(album, photo) {
return "albums/" + album.path + "/" + photo.name; return "albums/" + album.path + "/" + photo.name;
}; };
@ -176,6 +179,7 @@
PhotoFloat.prototype.photoHash = PhotoFloat.photoHash; PhotoFloat.prototype.photoHash = PhotoFloat.photoHash;
PhotoFloat.prototype.albumHash = PhotoFloat.albumHash; PhotoFloat.prototype.albumHash = PhotoFloat.albumHash;
PhotoFloat.prototype.photoPath = PhotoFloat.photoPath; PhotoFloat.prototype.photoPath = PhotoFloat.photoPath;
PhotoFloat.prototype.videoPath = PhotoFloat.videoPath;
PhotoFloat.prototype.originalPhotoPath = PhotoFloat.originalPhotoPath; PhotoFloat.prototype.originalPhotoPath = PhotoFloat.originalPhotoPath;
PhotoFloat.prototype.trimExtension = PhotoFloat.trimExtension; PhotoFloat.prototype.trimExtension = PhotoFloat.trimExtension;
PhotoFloat.prototype.cleanHash = PhotoFloat.cleanHash; PhotoFloat.prototype.cleanHash = PhotoFloat.cleanHash;

View File

@ -145,6 +145,7 @@ $(document).ready(function() {
$("#album-view").removeClass("photo-view-container"); $("#album-view").removeClass("photo-view-container");
$("#subalbums").show(); $("#subalbums").show();
$("#photo-view").hide(); $("#photo-view").hide();
$("#video")[0].pause()
} }
setTimeout(scrollToThumb, 1); setTimeout(scrollToThumb, 1);
} }
@ -165,25 +166,43 @@ $(document).ready(function() {
image.css("height", "100%").css("width", "auto").css("position", "").css("bottom", ""); image.css("height", "100%").css("width", "auto").css("position", "").css("bottom", "");
} }
function showPhoto() { function showPhoto() {
var width, height, photoSrc, previousPhoto, nextPhoto, nextLink, text; var width, height, photoSrc, videoSrc, previousPhoto, nextPhoto, nextLink, text;
width = currentPhoto.size[0];
height = currentPhoto.size[1]; if (currentPhoto.mediaType == "video") {
if (width > height) { width = currentPhoto.size[0];
height = height / width * maxSize; height = currentPhoto.size[1];
width = maxSize; videoSrc = photoFloat.videoPath(currentAlbum, currentPhoto);
} else { $("#video")
width = width / height * maxSize; .attr("width", width).attr("height", height).attr("ratio", currentPhoto.size[0] / currentPhoto.size[1])
height = maxSize; .attr("src", videoSrc)
.attr("alt", currentPhoto.name);
$("#video-box-inner").css('height', height + 'px').css('margin-top', - height / 2);
$("#photo-box").hide();
$("#video-box").show();
}
else {
width = currentPhoto.size[0];
height = currentPhoto.size[1];
if (width > height) {
height = height / width * maxSize;
width = maxSize;
} else {
width = width / height * maxSize;
height = maxSize;
}
$(window).unbind("resize", scaleImage);
photoSrc = photoFloat.photoPath(currentAlbum, currentPhoto, maxSize, false);
$("#photo")
.attr("width", width).attr("height", height).attr("ratio", currentPhoto.size[0] / currentPhoto.size[1])
.attr("src", photoSrc)
.attr("alt", currentPhoto.name)
.attr("title", currentPhoto.date)
.load(scaleImage);
$("head").append("<link rel=\"image_src\" href=\"" + photoSrc + "\" />");
$("#video")[0].pause()
$("#video-box").hide();
$("#photo-box").show();
} }
$(window).unbind("resize", scaleImage);
photoSrc = photoFloat.photoPath(currentAlbum, currentPhoto, maxSize, false);
$("#photo")
.attr("width", width).attr("height", height).attr("ratio", currentPhoto.size[0] / currentPhoto.size[1])
.attr("src", photoSrc)
.attr("alt", currentPhoto.name)
.attr("title", currentPhoto.date)
.load(scaleImage);
$("head").append("<link rel=\"image_src\" href=\"" + photoSrc + "\" />");
previousPhoto = currentAlbum.photos[ previousPhoto = currentAlbum.photos[
(currentPhotoIndex - 1 < 0) ? (currentAlbum.photos.length - 1) : (currentPhotoIndex - 1) (currentPhotoIndex - 1 < 0) ? (currentAlbum.photos.length - 1) : (currentPhotoIndex - 1)