diff --git a/scanner/PhotoAlbum.py b/scanner/PhotoAlbum.py index aeb3087..368b1d5 100644 --- a/scanner/PhotoAlbum.py +++ b/scanner/PhotoAlbum.py @@ -6,6 +6,8 @@ import os.path from PIL import Image from PIL.ExifTags import TAGS import gc +import tempfile +import subprocess class Album(object): def __init__(self, path): @@ -102,12 +104,13 @@ class Album(object): if trim_base(path) == photo._path: return photo return None - + class Photo(object): thumb_sizes = [ (75, True), (150, True), (640, False), (800, False), (1024, False) ] def __init__(self, path, thumb_path=None, attributes=None): self._path = trim_base(path) self.is_valid = True + image = None try: mtime = file_mtime(path) except KeyboardInterrupt: @@ -120,17 +123,26 @@ class Photo(object): return self._attributes = {} self._attributes["dateTimeFile"] = mtime + self._attributes["mediaType"] = "photo" try: image = Image.open(path) except KeyboardInterrupt: raise except: + self._video_metadata(path) + + if isinstance(image, Image.Image): + self._photo_metadata(image) + self._photo_thumbnails(image, thumb_path, path) + elif self._attributes["mediaType"] == "video": + self._video_metadata(path) + self._video_thumbnails(thumb_path, path) + self._video_transcode(thumb_path, path) + else: self.is_valid = False - return - self._metadata(image) - self._thumbnails(image, thumb_path, path) - def _metadata(self, image): + + def _photo_metadata(self, image): self._attributes["size"] = image.size self._orientation = 1 try: @@ -160,8 +172,8 @@ class Photo(object): self._orientation = exif["Orientation"]; if self._orientation in range(5, 9): self._attributes["size"] = (self._attributes["size"][1], self._attributes["size"][0]) - if self._orientation - 1 < len(self._metadata.orientation_list): - self._attributes["orientation"] = self._metadata.orientation_list[self._orientation - 1] + if self._orientation - 1 < len(self._photo_metadata.orientation_list): + self._attributes["orientation"] = self._photo_metadata.orientation_list[self._orientation - 1] if "Make" in exif: self._attributes["make"] = exif["Make"] if "Model" in exif: @@ -180,32 +192,32 @@ class Photo(object): self._attributes["iso"] = exif["PhotographicSensitivity"] if "ExposureTime" in exif: self._attributes["exposureTime"] = exif["ExposureTime"] - if "Flash" in exif and exif["Flash"] in self._metadata.flash_dictionary: + if "Flash" in exif and exif["Flash"] in self._photo_metadata.flash_dictionary: try: - self._attributes["flash"] = self._metadata.flash_dictionary[exif["Flash"]] + self._attributes["flash"] = self._photo_metadata.flash_dictionary[exif["Flash"]] except KeyboardInterrupt: raise except: pass - if "LightSource" in exif and exif["LightSource"] in self._metadata.light_source_dictionary: + if "LightSource" in exif and exif["LightSource"] in self._photo_metadata.light_source_dictionary: try: - self._attributes["lightSource"] = self._metadata.light_source_dictionary[exif["LightSource"]] + self._attributes["lightSource"] = self._photo_metadata.light_source_dictionary[exif["LightSource"]] except KeyboardInterrupt: raise except: pass - if "ExposureProgram" in exif and exif["ExposureProgram"] < len(self._metadata.exposure_list): - self._attributes["exposureProgram"] = self._metadata.exposure_list[exif["ExposureProgram"]] + if "ExposureProgram" in exif and exif["ExposureProgram"] < len(self._photo_metadata.exposure_list): + self._attributes["exposureProgram"] = self._photo_metadata.exposure_list[exif["ExposureProgram"]] if "SpectralSensitivity" in exif: self._attributes["spectralSensitivity"] = exif["SpectralSensitivity"] - if "MeteringMode" in exif and exif["MeteringMode"] < len(self._metadata.metering_list): - self._attributes["meteringMode"] = self._metadata.metering_list[exif["MeteringMode"]] - if "SensingMethod" in exif and exif["SensingMethod"] < len(self._metadata.sensing_method_list): - self._attributes["sensingMethod"] = self._metadata.sensing_method_list[exif["SensingMethod"]] - if "SceneCaptureType" in exif and exif["SceneCaptureType"] < len(self._metadata.scene_capture_type_list): - self._attributes["sceneCaptureType"] = self._metadata.scene_capture_type_list[exif["SceneCaptureType"]] - if "SubjectDistanceRange" in exif and exif["SubjectDistanceRange"] < len(self._metadata.subject_distance_range_list): - self._attributes["subjectDistanceRange"] = self._metadata.subject_distance_range_list[exif["SubjectDistanceRange"]] + if "MeteringMode" in exif and exif["MeteringMode"] < len(self._photo_metadata.metering_list): + self._attributes["meteringMode"] = self._photo_metadata.metering_list[exif["MeteringMode"]] + if "SensingMethod" in exif and exif["SensingMethod"] < len(self._photo_metadata.sensing_method_list): + self._attributes["sensingMethod"] = self._photo_metadata.sensing_method_list[exif["SensingMethod"]] + if "SceneCaptureType" in exif and exif["SceneCaptureType"] < len(self._photo_metadata.scene_capture_type_list): + self._attributes["sceneCaptureType"] = self._photo_metadata.scene_capture_type_list[exif["SceneCaptureType"]] + if "SubjectDistanceRange" in exif and exif["SubjectDistanceRange"] < len(self._photo_metadata.subject_distance_range_list): + self._attributes["subjectDistanceRange"] = self._photo_metadata.subject_distance_range_list[exif["SubjectDistanceRange"]] if "ExposureCompensation" in exif: self._attributes["exposureCompensation"] = exif["ExposureCompensation"] if "ExposureBiasValue" in exif: @@ -215,15 +227,38 @@ class Photo(object): if "DateTime" in exif: self._attributes["dateTime"] = exif["DateTime"] - _metadata.flash_dictionary = {0x0: "No Flash", 0x1: "Fired",0x5: "Fired, Return not detected",0x7: "Fired, Return detected",0x8: "On, Did not fire",0x9: "On, Fired",0xd: "On, Return not detected",0xf: "On, Return detected",0x10: "Off, Did not fire",0x14: "Off, Did not fire, Return not detected",0x18: "Auto, Did not fire",0x19: "Auto, Fired",0x1d: "Auto, Fired, Return not detected",0x1f: "Auto, Fired, Return detected",0x20: "No flash function",0x30: "Off, No flash function",0x41: "Fired, Red-eye reduction",0x45: "Fired, Red-eye reduction, Return not detected",0x47: "Fired, Red-eye reduction, Return detected",0x49: "On, Red-eye reduction",0x4d: "On, Red-eye reduction, Return not detected",0x4f: "On, Red-eye reduction, Return detected",0x50: "Off, Red-eye reduction",0x58: "Auto, Did not fire, Red-eye reduction",0x59: "Auto, Fired, Red-eye reduction",0x5d: "Auto, Fired, Red-eye reduction, Return not detected",0x5f: "Auto, Fired, Red-eye reduction, Return detected"} - _metadata.light_source_dictionary = {0: "Unknown", 1: "Daylight", 2: "Fluorescent", 3: "Tungsten (incandescent light)", 4: "Flash", 9: "Fine weather", 10: "Cloudy weather", 11: "Shade", 12: "Daylight fluorescent (D 5700 - 7100K)", 13: "Day white fluorescent (N 4600 - 5400K)", 14: "Cool white fluorescent (W 3900 - 4500K)", 15: "White fluorescent (WW 3200 - 3700K)", 17: "Standard light A", 18: "Standard light B", 19: "Standard light C", 20: "D55", 21: "D65", 22: "D75", 23: "D50", 24: "ISO studio tungsten"} - _metadata.metering_list = ["Unknown", "Average", "Center-weighted average", "Spot", "Multi-spot", "Multi-segment", "Partial"] - _metadata.exposure_list = ["Not Defined", "Manual", "Program AE", "Aperture-priority AE", "Shutter speed priority AE", "Creative (Slow speed)", "Action (High speed)", "Portrait", "Landscape", "Bulb"] - _metadata.orientation_list = ["Horizontal (normal)", "Mirror horizontal", "Rotate 180", "Mirror vertical", "Mirror horizontal and rotate 270 CW", "Rotate 90 CW", "Mirror horizontal and rotate 90 CW", "Rotate 270 CW"] - _metadata.sensing_method_list = ["Not defined", "One-chip color area sensor", "Two-chip color area sensor", "Three-chip color area sensor", "Color sequential area sensor", "Trilinear sensor", "Color sequential linear sensor"] - _metadata.scene_capture_type_list = ["Standard", "Landscape", "Portrait", "Night scene"] - _metadata.subject_distance_range_list = ["Unknown", "Macro", "Close view", "Distant view"] - + _photo_metadata.flash_dictionary = {0x0: "No Flash", 0x1: "Fired",0x5: "Fired, Return not detected",0x7: "Fired, Return detected",0x8: "On, Did not fire",0x9: "On, Fired",0xd: "On, Return not detected",0xf: "On, Return detected",0x10: "Off, Did not fire",0x14: "Off, Did not fire, Return not detected",0x18: "Auto, Did not fire",0x19: "Auto, Fired",0x1d: "Auto, Fired, Return not detected",0x1f: "Auto, Fired, Return detected",0x20: "No flash function",0x30: "Off, No flash function",0x41: "Fired, Red-eye reduction",0x45: "Fired, Red-eye reduction, Return not detected",0x47: "Fired, Red-eye reduction, Return detected",0x49: "On, Red-eye reduction",0x4d: "On, Red-eye reduction, Return not detected",0x4f: "On, Red-eye reduction, Return detected",0x50: "Off, Red-eye reduction",0x58: "Auto, Did not fire, Red-eye reduction",0x59: "Auto, Fired, Red-eye reduction",0x5d: "Auto, Fired, Red-eye reduction, Return not detected",0x5f: "Auto, Fired, Red-eye reduction, Return detected"} + _photo_metadata.light_source_dictionary = {0: "Unknown", 1: "Daylight", 2: "Fluorescent", 3: "Tungsten (incandescent light)", 4: "Flash", 9: "Fine weather", 10: "Cloudy weather", 11: "Shade", 12: "Daylight fluorescent (D 5700 - 7100K)", 13: "Day white fluorescent (N 4600 - 5400K)", 14: "Cool white fluorescent (W 3900 - 4500K)", 15: "White fluorescent (WW 3200 - 3700K)", 17: "Standard light A", 18: "Standard light B", 19: "Standard light C", 20: "D55", 21: "D65", 22: "D75", 23: "D50", 24: "ISO studio tungsten"} + _photo_metadata.metering_list = ["Unknown", "Average", "Center-weighted average", "Spot", "Multi-spot", "Multi-segment", "Partial"] + _photo_metadata.exposure_list = ["Not Defined", "Manual", "Program AE", "Aperture-priority AE", "Shutter speed priority AE", "Creative (Slow speed)", "Action (High speed)", "Portrait", "Landscape", "Bulb"] + _photo_metadata.orientation_list = ["Horizontal (normal)", "Mirror horizontal", "Rotate 180", "Mirror vertical", "Mirror horizontal and rotate 270 CW", "Rotate 90 CW", "Mirror horizontal and rotate 90 CW", "Rotate 270 CW"] + _photo_metadata.sensing_method_list = ["Not defined", "One-chip color area sensor", "Two-chip color area sensor", "Three-chip color area sensor", "Color sequential area sensor", "Trilinear sensor", "Color sequential linear sensor"] + _photo_metadata.scene_capture_type_list = ["Standard", "Landscape", "Portrait", "Night scene"] + _photo_metadata.subject_distance_range_list = ["Unknown", "Macro", "Close view", "Distant view"] + + def _video_metadata(self, path, original=True): + try: + p = subprocess.check_output(['/usr/bin/ffprobe', '-show_format', '-show_streams', '-of', 'json', '-loglevel', '0', path]) + except KeyboardInterrupt: + raise + except: + return + info = json.loads(p) + for s in info["streams"]: + if 'codec_type' in s and s['codec_type'] == 'video': + if original: + self._attributes["mediaType"] = "video" + self._attributes["originalSize"] = (int(s["width"]), int(s["height"])) + self._attributes["duration"] = s["duration"] + if "tags" in s: + # creation_time only in UTC! + # https://code.google.com/p/android/issues/detail?id=60225#c6 + #self._attributes["dateTime"] = s["tags"]["creation_time"] + if "rotate" in s["tags"]: + self._attributes["rotate"] = s["tags"]["rotate"] + else: + self._attributes["size"] = (int(s["width"]), int(s["height"])) + def _thumbnail(self, image, thumb_path, original_path, size, square=False): thumb_path = os.path.join(thumb_path, image_cache(self._path, size, square)) info_string = "%s -> %spx" % (os.path.basename(original_path), str(size)) @@ -268,7 +303,7 @@ class Photo(object): message("save failure", os.path.basename(thumb_path)) os.unlink(thumb_path) - def _thumbnails(self, image, thumb_path, original_path): + def _photo_thumbnails(self, image, thumb_path, original_path): mirror = image if self._orientation == 2: # Vertical Mirror @@ -293,6 +328,73 @@ class Photo(object): mirror = image.transpose(Image.ROTATE_90) for size in Photo.thumb_sizes: self._thumbnail(mirror, thumb_path, original_path, size[0], size[1]) + + def _video_thumbnails(self, thumb_path, original_path): + (tfd, tfn) = tempfile.mkstemp(); + try: + subprocess.check_call(['/usr/bin/ffmpeg', '-i', original_path, '-f', 'image2', '-vsync', '1', '-vframes', '1', '-an', '-loglevel', 'quiet', tfn]) + except KeyboardInterrupt: + os.unlink(tfn) + raise + except: + message("couldn't extract video frame", os.path.basename(original_path)) + os.unlink(tfn) + return + try: + image = Image.open(tfn) + except KeyboardInterrupt: + raise + except: + message("couldn't open video thumbnail", tfn) + os.unlink(tfn) + return + mirror = image + if "rotate" in self._attributes: + if self._attributes["rotate"] == "90": + mirror = image.transpose(Image.ROTATE_270) + elif self._attributes["rotate"] == "180": + mirror = image.transpose(Image.ROTATE_180) + elif self._attributes["rotate"] == "270": + mirror = image.transpose(Image.ROTATE_90) + for size in Photo.thumb_sizes: + if size[1]: + self._thumbnail(mirror, thumb_path, original_path, size[0], size[1]) + os.unlink(tfn) + + def _video_transcode(self, transcode_path, original_path): + transcode_path = os.path.join(transcode_path, cache_base(self._path) + '.webm') + transcode_cmd = ['/usr/bin/ffmpeg', '-i', original_path, '-c:v', 'libvpx', '-crf', '10', '-b:v', '800k', '-c:a', 'libvorbis', '-f', 'webm', '-threads', '2', '-loglevel', '0', '-y'] + filters = [] + info_string = "%s -> webm" % (os.path.basename(original_path)) + message("transcoding", info_string) + if os.path.exists(transcode_path) and file_mtime(transcode_path) >= self._attributes["dateTimeFile"]: + return + if "originalSize" in self._attributes and self._attributes["originalSize"][1] > 720: + filters.append("scale=trunc(oh*a/2)*2:min(720\,iw)") + if "rotate" in self._attributes: + if self._attributes["rotate"] == "90": + filters.append('transpose=1') + elif self._attributes["rotate"] == "180": + filters.append('vflip,hflip') + elif self._attributes["rotate"] == "270": + filters.append('transpose=2') + if len(filters): + transcode_cmd.append('-vf') + transcode_cmd.append(','.join(filters)) + transcode_cmd.append(transcode_path) + try: + subprocess.call(transcode_cmd) + except KeyboardInterrupt: + raise + except: + message("transcoding failure", os.path.basename(original_path)) + try: + os.unlink(transcode_path) + except: + pass + return + self._video_metadata(transcode_path, False) + @property def name(self): return os.path.basename(self._path) @@ -303,7 +405,15 @@ class Photo(object): return self._path @property def image_caches(self): - return [image_cache(self._path, size[0], size[1]) for size in Photo.thumb_sizes] + caches = [] + if "mediaType" in self._attributes and self._attributes["mediaType"] == "video": + for size in Photo.thumb_sizes: + if size[1]: + caches.append(image_cache(self._path, size[0], size[1])) + caches.append(cache_base(self._path) + '.webm') + else: + caches = [image_cache(self._path, size[0], size[1]) for size in Photo.thumb_sizes] + return caches @property def date(self): if not self.is_valid: diff --git a/web/css/000-controls.css b/web/css/000-controls.css index 66fe6d6..1aee05b 100644 --- a/web/css/000-controls.css +++ b/web/css/000-controls.css @@ -91,9 +91,14 @@ a:hover { right: 0; text-align: center; } -#photo-box { +#photo-box, #video-box { display: inline; } +#video-box-inner { + position: absolute; + top: 50%; + width: 100%; +} #photo-links { background-color: #000000; font-weight: bold; diff --git a/web/index.html b/web/index.html index 388609b..ab9b61d 100644 --- a/web/index.html +++ b/web/index.html @@ -21,6 +21,11 @@
+
+
+ +
+
diff --git a/web/js/010-libphotofloat.js b/web/js/010-libphotofloat.js index 30cf889..a4ad3db 100644 --- a/web/js/010-libphotofloat.js +++ b/web/js/010-libphotofloat.js @@ -144,6 +144,9 @@ suffix = size.toString(); return "cache/" + PhotoFloat.cachePath(PhotoFloat.photoHash(album, photo) + "_" + suffix + ".jpg"); }; + PhotoFloat.videoPath = function(album, video) { + return "cache/" + PhotoFloat.cachePath(PhotoFloat.photoHash(album, video) + ".webm"); + }; PhotoFloat.originalPhotoPath = function(album, photo) { return "albums/" + album.path + "/" + photo.name; }; @@ -176,6 +179,7 @@ PhotoFloat.prototype.photoHash = PhotoFloat.photoHash; PhotoFloat.prototype.albumHash = PhotoFloat.albumHash; PhotoFloat.prototype.photoPath = PhotoFloat.photoPath; + PhotoFloat.prototype.videoPath = PhotoFloat.videoPath; PhotoFloat.prototype.originalPhotoPath = PhotoFloat.originalPhotoPath; PhotoFloat.prototype.trimExtension = PhotoFloat.trimExtension; PhotoFloat.prototype.cleanHash = PhotoFloat.cleanHash; diff --git a/web/js/011-display.js b/web/js/011-display.js index f0ddd77..007eb32 100644 --- a/web/js/011-display.js +++ b/web/js/011-display.js @@ -145,6 +145,7 @@ $(document).ready(function() { $("#album-view").removeClass("photo-view-container"); $("#subalbums").show(); $("#photo-view").hide(); + $("#video")[0].pause() } setTimeout(scrollToThumb, 1); } @@ -165,25 +166,43 @@ $(document).ready(function() { image.css("height", "100%").css("width", "auto").css("position", "").css("bottom", ""); } function showPhoto() { - var width, height, photoSrc, previousPhoto, nextPhoto, nextLink, text; - width = currentPhoto.size[0]; - height = currentPhoto.size[1]; - if (width > height) { - height = height / width * maxSize; - width = maxSize; - } else { - width = width / height * maxSize; - height = maxSize; + var width, height, photoSrc, videoSrc, previousPhoto, nextPhoto, nextLink, text; + + if (currentPhoto.mediaType == "video") { + width = currentPhoto.size[0]; + height = currentPhoto.size[1]; + videoSrc = photoFloat.videoPath(currentAlbum, currentPhoto); + $("#video") + .attr("width", width).attr("height", height).attr("ratio", currentPhoto.size[0] / currentPhoto.size[1]) + .attr("src", videoSrc) + .attr("alt", currentPhoto.name); + $("#video-box-inner").css('height', height + 'px').css('margin-top', - height / 2); + $("#photo-box").hide(); + $("#video-box").show(); + } + else { + width = currentPhoto.size[0]; + height = currentPhoto.size[1]; + if (width > height) { + height = height / width * maxSize; + width = maxSize; + } else { + width = width / height * maxSize; + height = maxSize; + } + $(window).unbind("resize", scaleImage); + photoSrc = photoFloat.photoPath(currentAlbum, currentPhoto, maxSize, false); + $("#photo") + .attr("width", width).attr("height", height).attr("ratio", currentPhoto.size[0] / currentPhoto.size[1]) + .attr("src", photoSrc) + .attr("alt", currentPhoto.name) + .attr("title", currentPhoto.date) + .load(scaleImage); + $("head").append(""); + $("#video")[0].pause() + $("#video-box").hide(); + $("#photo-box").show(); } - $(window).unbind("resize", scaleImage); - photoSrc = photoFloat.photoPath(currentAlbum, currentPhoto, maxSize, false); - $("#photo") - .attr("width", width).attr("height", height).attr("ratio", currentPhoto.size[0] / currentPhoto.size[1]) - .attr("src", photoSrc) - .attr("alt", currentPhoto.name) - .attr("title", currentPhoto.date) - .load(scaleImage); - $("head").append(""); previousPhoto = currentAlbum.photos[ (currentPhotoIndex - 1 < 0) ? (currentAlbum.photos.length - 1) : (currentPhotoIndex - 1)