2 Commits

21 changed files with 5987 additions and 8620 deletions

View File

@ -24,16 +24,14 @@ def trim_base_custom(path, base):
return path
def trim_base(path):
return trim_base_custom(path, trim_base.base)
def cache_base(path, filepath=False):
if len(path) == 0:
return "root"
elif filepath and len(path.split(os.sep)) < 2:
path = "root-" + path
def cache_base(path):
path = trim_base(path).replace('/', '-').replace(' ', '_').replace('(', '').replace('&', '').replace(',', '').replace(')', '').replace('#', '').replace('[', '').replace(']', '').replace('"', '').replace("'", '').replace('_-_', '-').lower()
while path.find("--") != -1:
path = path.replace("--", "-")
while path.find("__") != -1:
path = path.replace("__", "_")
if len(path) == 0:
path = "root"
return path
def json_cache(path):
return cache_base(path) + ".json"
@ -42,8 +40,6 @@ def image_cache(path, size, square=False):
suffix = str(size) + "s"
else:
suffix = str(size)
return cache_base(path, True) + "_" + suffix + ".jpg"
def video_cache(path):
return cache_base(path, True) + ".mp4"
return cache_base(path) + "_" + suffix + ".jpg"
def file_mtime(path):
return datetime.fromtimestamp(int(os.path.getmtime(path)))

View File

@ -5,17 +5,7 @@ import os
import os.path
from PIL import Image
from PIL.ExifTags import TAGS
from multiprocessing import Pool
import gc
import tempfile
from VideoToolWrapper import *
def make_photo_thumbs(self, original_path, thumb_path, size):
# The pool methods use a queue.Queue to pass tasks to the worker processes.
# Everything that goes through the queue.Queue must be pickable, and since
# self._photo_thumbnail is not defined at the top level, it's not pickable.
# This is why we have this "dummy" function, so that it's pickable.
self._photo_thumbnail(original_path, thumb_path, size[0], size[1])
class Album(object):
def __init__(self, path):
@ -114,11 +104,10 @@ class Album(object):
return None
class Photo(object):
thumb_sizes = [ (75, True), (150, True), (640, False), (1024, False), (1600, False) ]
thumb_sizes = [ (75, True), (150, True), (640, False), (800, False), (1024, False) ]
def __init__(self, path, thumb_path=None, attributes=None):
self._path = trim_base(path)
self.is_valid = True
image = None
try:
mtime = file_mtime(path)
except KeyboardInterrupt:
@ -131,26 +120,17 @@ class Photo(object):
return
self._attributes = {}
self._attributes["dateTimeFile"] = mtime
self._attributes["mediaType"] = "photo"
try:
image = Image.open(path)
except KeyboardInterrupt:
raise
except:
self._video_metadata(path)
if isinstance(image, Image.Image):
self._photo_metadata(image)
self._photo_thumbnails(path, thumb_path)
elif self._attributes["mediaType"] == "video":
self._video_thumbnails(thumb_path, path)
self._video_transcode(thumb_path, path)
else:
self.is_valid = False
return
def _photo_metadata(self, image):
self._metadata(image)
self._thumbnails(image, thumb_path, path)
def _metadata(self, image):
self._attributes["size"] = image.size
self._orientation = 1
try:
@ -182,8 +162,8 @@ class Photo(object):
self._orientation = exif["Orientation"];
if self._orientation in range(5, 9):
self._attributes["size"] = (self._attributes["size"][1], self._attributes["size"][0])
if self._orientation - 1 < len(self._photo_metadata.orientation_list):
self._attributes["orientation"] = self._photo_metadata.orientation_list[self._orientation - 1]
if self._orientation - 1 < len(self._metadata.orientation_list):
self._attributes["orientation"] = self._metadata.orientation_list[self._orientation - 1]
if "Make" in exif:
self._attributes["make"] = exif["Make"]
if "Model" in exif:
@ -202,133 +182,51 @@ class Photo(object):
self._attributes["iso"] = exif["PhotographicSensitivity"]
if "ExposureTime" in exif:
self._attributes["exposureTime"] = exif["ExposureTime"]
if "Flash" in exif and exif["Flash"] in self._photo_metadata.flash_dictionary:
if "Flash" in exif and exif["Flash"] in self._metadata.flash_dictionary:
try:
self._attributes["flash"] = self._photo_metadata.flash_dictionary[exif["Flash"]]
self._attributes["flash"] = self._metadata.flash_dictionary[exif["Flash"]]
except KeyboardInterrupt:
raise
except:
pass
if "LightSource" in exif and exif["LightSource"] in self._photo_metadata.light_source_dictionary:
if "LightSource" in exif and exif["LightSource"] in self._metadata.light_source_dictionary:
try:
self._attributes["lightSource"] = self._photo_metadata.light_source_dictionary[exif["LightSource"]]
self._attributes["lightSource"] = self._metadata.light_source_dictionary[exif["LightSource"]]
except KeyboardInterrupt:
raise
except:
pass
if "ExposureProgram" in exif and exif["ExposureProgram"] < len(self._photo_metadata.exposure_list):
self._attributes["exposureProgram"] = self._photo_metadata.exposure_list[exif["ExposureProgram"]]
if "ExposureProgram" in exif and exif["ExposureProgram"] < len(self._metadata.exposure_list):
self._attributes["exposureProgram"] = self._metadata.exposure_list[exif["ExposureProgram"]]
if "SpectralSensitivity" in exif:
self._attributes["spectralSensitivity"] = exif["SpectralSensitivity"]
if "MeteringMode" in exif and exif["MeteringMode"] < len(self._photo_metadata.metering_list):
self._attributes["meteringMode"] = self._photo_metadata.metering_list[exif["MeteringMode"]]
if "SensingMethod" in exif and exif["SensingMethod"] < len(self._photo_metadata.sensing_method_list):
self._attributes["sensingMethod"] = self._photo_metadata.sensing_method_list[exif["SensingMethod"]]
if "SceneCaptureType" in exif and exif["SceneCaptureType"] < len(self._photo_metadata.scene_capture_type_list):
self._attributes["sceneCaptureType"] = self._photo_metadata.scene_capture_type_list[exif["SceneCaptureType"]]
if "SubjectDistanceRange" in exif and exif["SubjectDistanceRange"] < len(self._photo_metadata.subject_distance_range_list):
self._attributes["subjectDistanceRange"] = self._photo_metadata.subject_distance_range_list[exif["SubjectDistanceRange"]]
if "MeteringMode" in exif and exif["MeteringMode"] < len(self._metadata.metering_list):
self._attributes["meteringMode"] = self._metadata.metering_list[exif["MeteringMode"]]
if "SensingMethod" in exif and exif["SensingMethod"] < len(self._metadata.sensing_method_list):
self._attributes["sensingMethod"] = self._metadata.sensing_method_list[exif["SensingMethod"]]
if "SceneCaptureType" in exif and exif["SceneCaptureType"] < len(self._metadata.scene_capture_type_list):
self._attributes["sceneCaptureType"] = self._metadata.scene_capture_type_list[exif["SceneCaptureType"]]
if "SubjectDistanceRange" in exif and exif["SubjectDistanceRange"] < len(self._metadata.subject_distance_range_list):
self._attributes["subjectDistanceRange"] = self._metadata.subject_distance_range_list[exif["SubjectDistanceRange"]]
if "ExposureCompensation" in exif:
self._attributes["exposureCompensation"] = exif["ExposureCompensation"]
if "ExposureBiasValue" in exif:
self._attributes["exposureCompensation"] = exif["ExposureBiasValue"]
if "DateTimeOriginal" in exif:
try:
self._attributes["dateTimeOriginal"] = datetime.strptime(exif["DateTimeOriginal"], '%Y:%m:%d %H:%M:%S')
except KeyboardInterrupt:
raise
except TypeError:
self._attributes["dateTimeOriginal"] = exif["DateTimeOriginal"]
self._attributes["dateTimeOriginal"] = exif["DateTimeOriginal"]
if "DateTime" in exif:
try:
self._attributes["dateTime"] = datetime.strptime(exif["DateTime"], '%Y:%m:%d %H:%M:%S')
except KeyboardInterrupt:
raise
except TypeError:
self._attributes["dateTime"] = exif["DateTime"]
self._attributes["dateTime"] = exif["DateTime"]
_photo_metadata.flash_dictionary = {0x0: "No Flash", 0x1: "Fired",0x5: "Fired, Return not detected",0x7: "Fired, Return detected",0x8: "On, Did not fire",0x9: "On, Fired",0xd: "On, Return not detected",0xf: "On, Return detected",0x10: "Off, Did not fire",0x14: "Off, Did not fire, Return not detected",0x18: "Auto, Did not fire",0x19: "Auto, Fired",0x1d: "Auto, Fired, Return not detected",0x1f: "Auto, Fired, Return detected",0x20: "No flash function",0x30: "Off, No flash function",0x41: "Fired, Red-eye reduction",0x45: "Fired, Red-eye reduction, Return not detected",0x47: "Fired, Red-eye reduction, Return detected",0x49: "On, Red-eye reduction",0x4d: "On, Red-eye reduction, Return not detected",0x4f: "On, Red-eye reduction, Return detected",0x50: "Off, Red-eye reduction",0x58: "Auto, Did not fire, Red-eye reduction",0x59: "Auto, Fired, Red-eye reduction",0x5d: "Auto, Fired, Red-eye reduction, Return not detected",0x5f: "Auto, Fired, Red-eye reduction, Return detected"}
_photo_metadata.light_source_dictionary = {0: "Unknown", 1: "Daylight", 2: "Fluorescent", 3: "Tungsten (incandescent light)", 4: "Flash", 9: "Fine weather", 10: "Cloudy weather", 11: "Shade", 12: "Daylight fluorescent (D 5700 - 7100K)", 13: "Day white fluorescent (N 4600 - 5400K)", 14: "Cool white fluorescent (W 3900 - 4500K)", 15: "White fluorescent (WW 3200 - 3700K)", 17: "Standard light A", 18: "Standard light B", 19: "Standard light C", 20: "D55", 21: "D65", 22: "D75", 23: "D50", 24: "ISO studio tungsten"}
_photo_metadata.metering_list = ["Unknown", "Average", "Center-weighted average", "Spot", "Multi-spot", "Multi-segment", "Partial"]
_photo_metadata.exposure_list = ["Not Defined", "Manual", "Program AE", "Aperture-priority AE", "Shutter speed priority AE", "Creative (Slow speed)", "Action (High speed)", "Portrait", "Landscape", "Bulb"]
_photo_metadata.orientation_list = ["Horizontal (normal)", "Mirror horizontal", "Rotate 180", "Mirror vertical", "Mirror horizontal and rotate 270 CW", "Rotate 90 CW", "Mirror horizontal and rotate 90 CW", "Rotate 270 CW"]
_photo_metadata.sensing_method_list = ["Not defined", "One-chip color area sensor", "Two-chip color area sensor", "Three-chip color area sensor", "Color sequential area sensor", "Trilinear sensor", "Color sequential linear sensor"]
_photo_metadata.scene_capture_type_list = ["Standard", "Landscape", "Portrait", "Night scene"]
_photo_metadata.subject_distance_range_list = ["Unknown", "Macro", "Close view", "Distant view"]
_metadata.flash_dictionary = {0x0: "No Flash", 0x1: "Fired",0x5: "Fired, Return not detected",0x7: "Fired, Return detected",0x8: "On, Did not fire",0x9: "On, Fired",0xd: "On, Return not detected",0xf: "On, Return detected",0x10: "Off, Did not fire",0x14: "Off, Did not fire, Return not detected",0x18: "Auto, Did not fire",0x19: "Auto, Fired",0x1d: "Auto, Fired, Return not detected",0x1f: "Auto, Fired, Return detected",0x20: "No flash function",0x30: "Off, No flash function",0x41: "Fired, Red-eye reduction",0x45: "Fired, Red-eye reduction, Return not detected",0x47: "Fired, Red-eye reduction, Return detected",0x49: "On, Red-eye reduction",0x4d: "On, Red-eye reduction, Return not detected",0x4f: "On, Red-eye reduction, Return detected",0x50: "Off, Red-eye reduction",0x58: "Auto, Did not fire, Red-eye reduction",0x59: "Auto, Fired, Red-eye reduction",0x5d: "Auto, Fired, Red-eye reduction, Return not detected",0x5f: "Auto, Fired, Red-eye reduction, Return detected"}
_metadata.light_source_dictionary = {0: "Unknown", 1: "Daylight", 2: "Fluorescent", 3: "Tungsten (incandescent light)", 4: "Flash", 9: "Fine weather", 10: "Cloudy weather", 11: "Shade", 12: "Daylight fluorescent (D 5700 - 7100K)", 13: "Day white fluorescent (N 4600 - 5400K)", 14: "Cool white fluorescent (W 3900 - 4500K)", 15: "White fluorescent (WW 3200 - 3700K)", 17: "Standard light A", 18: "Standard light B", 19: "Standard light C", 20: "D55", 21: "D65", 22: "D75", 23: "D50", 24: "ISO studio tungsten"}
_metadata.metering_list = ["Unknown", "Average", "Center-weighted average", "Spot", "Multi-spot", "Multi-segment", "Partial"]
_metadata.exposure_list = ["Not Defined", "Manual", "Program AE", "Aperture-priority AE", "Shutter speed priority AE", "Creative (Slow speed)", "Action (High speed)", "Portrait", "Landscape", "Bulb"]
_metadata.orientation_list = ["Horizontal (normal)", "Mirror horizontal", "Rotate 180", "Mirror vertical", "Mirror horizontal and rotate 270 CW", "Rotate 90 CW", "Mirror horizontal and rotate 90 CW", "Rotate 270 CW"]
_metadata.sensing_method_list = ["Not defined", "One-chip color area sensor", "Two-chip color area sensor", "Three-chip color area sensor", "Color sequential area sensor", "Trilinear sensor", "Color sequential linear sensor"]
_metadata.scene_capture_type_list = ["Standard", "Landscape", "Portrait", "Night scene"]
_metadata.subject_distance_range_list = ["Unknown", "Macro", "Close view", "Distant view"]
def _video_metadata(self, path, original=True):
p = VideoProbeWrapper().call('-show_format', '-show_streams', '-of', 'json', '-loglevel', '0', path)
if p == False:
self.is_valid = False
return
info = json.loads(p)
for s in info["streams"]:
if 'codec_type' in s and s['codec_type'] == 'video':
self._attributes["mediaType"] = "video"
self._attributes["size"] = (int(s["width"]), int(s["height"]))
if "duration" in s:
self._attributes["duration"] = s["duration"]
if "tags" in s and "rotate" in s["tags"]:
self._attributes["rotate"] = s["tags"]["rotate"]
if original:
self._attributes["originalSize"] = (int(s["width"]), int(s["height"]))
# we break, because a video can contain several streams
# this way we only get/use values from the first stream
break
# use time from EXIF (rather than file creation)
try:
info['format']['tags']['creation_time']
except KeyError:
pass
else:
# we have time modifiable via exif
# lets use this
try:
self._attributes["dateTimeVideo"] = datetime.strptime(info['format']['tags']['creation_time'], '%Y-%m-%d %H:%M:%S')
except KeyboardInterrupt:
raise
except TypeError:
pass
def _photo_thumbnail(self, original_path, thumb_path, size, square=False):
try:
image = Image.open(original_path)
except KeyboardInterrupt:
raise
except:
self.is_valid = False
return
mirror = image
if self._orientation == 2:
# Vertical Mirror
mirror = image.transpose(Image.FLIP_LEFT_RIGHT)
elif self._orientation == 3:
# Rotation 180
mirror = image.transpose(Image.ROTATE_180)
elif self._orientation == 4:
# Horizontal Mirror
mirror = image.transpose(Image.FLIP_TOP_BOTTOM)
elif self._orientation == 5:
# Horizontal Mirror + Rotation 270
mirror = image.transpose(Image.FLIP_TOP_BOTTOM).transpose(Image.ROTATE_270)
elif self._orientation == 6:
# Rotation 270
mirror = image.transpose(Image.ROTATE_270)
elif self._orientation == 7:
# Vertical Mirror + Rotation 270
mirror = image.transpose(Image.FLIP_LEFT_RIGHT).transpose(Image.ROTATE_270)
elif self._orientation == 8:
# Rotation 90
mirror = image.transpose(Image.ROTATE_90)
image = mirror
self._thumbnail(image, original_path, thumb_path, size, square)
def _thumbnail(self, image, original_path, thumb_path, size, square):
def _thumbnail(self, image, thumb_path, original_path, size, square=False):
thumb_path = os.path.join(thumb_path, image_cache(self._path, size, square))
info_string = "%s -> %spx" % (os.path.basename(original_path), str(size))
if square:
@ -348,7 +246,6 @@ class Photo(object):
raise
except:
message("corrupt image", os.path.basename(original_path))
self.is_valid = False
return
if square:
if image.size[0] > image.size[1]:
@ -379,148 +276,31 @@ class Photo(object):
except:
pass
def _photo_thumbnails(self, original_path, thumb_path):
# get number of cores on the system, and use all minus one
num_of_cores = os.sysconf('SC_NPROCESSORS_ONLN') - 1
pool = Pool(processes=num_of_cores)
try:
for size in Photo.thumb_sizes:
pool.apply_async(make_photo_thumbs, args = (self, original_path, thumb_path, size))
except:
pool.terminate()
pool.close()
pool.join()
def _video_thumbnails(self, thumb_path, original_path):
(tfd, tfn) = tempfile.mkstemp();
p = VideoTranscodeWrapper().call(
'-i', original_path, # original file to extract thumbs from
'-f', 'image2', # extract image
'-vsync', '1', # CRF
'-vframes', '1', # extrat 1 single frame
'-an', # disable audio
'-loglevel', 'quiet', # don't display anything
'-y', # don't prompt for overwrite
tfn # temporary file to store extracted image
)
if p == False:
message("couldn't extract video frame", os.path.basename(original_path))
try:
os.unlink(tfn)
except:
pass
self.is_valid = False
return
try:
image = Image.open(tfn)
except KeyboardInterrupt:
try:
os.unlink(tfn)
except:
pass
raise
except:
message("couldn't open video thumbnail", tfn)
try:
os.unlink(tfn)
except:
pass
self.is_valid = False
return
def _thumbnails(self, image, thumb_path, original_path):
mirror = image
if "rotate" in self._attributes:
if self._attributes["rotate"] == "90":
mirror = image.transpose(Image.ROTATE_270)
elif self._attributes["rotate"] == "180":
mirror = image.transpose(Image.ROTATE_180)
elif self._attributes["rotate"] == "270":
mirror = image.transpose(Image.ROTATE_90)
if self._orientation == 2:
# Vertical Mirror
mirror = image.transpose(Image.FLIP_LEFT_RIGHT)
elif self._orientation == 3:
# Rotation 180
mirror = image.transpose(Image.ROTATE_180)
elif self._orientation == 4:
# Horizontal Mirror
mirror = image.transpose(Image.FLIP_TOP_BOTTOM)
elif self._orientation == 5:
# Horizontal Mirror + Rotation 270
mirror = image.transpose(Image.FLIP_TOP_BOTTOM).transpose(Image.ROTATE_270)
elif self._orientation == 6:
# Rotation 270
mirror = image.transpose(Image.ROTATE_270)
elif self._orientation == 7:
# Vertical Mirror + Rotation 270
mirror = image.transpose(Image.FLIP_LEFT_RIGHT).transpose(Image.ROTATE_270)
elif self._orientation == 8:
# Rotation 90
mirror = image.transpose(Image.ROTATE_90)
for size in Photo.thumb_sizes:
if size[1]:
self._thumbnail(mirror, original_path, thumb_path, size[0], size[1])
try:
os.unlink(tfn)
except:
pass
def _video_transcode(self, transcode_path, original_path):
transcode_path = os.path.join(transcode_path, video_cache(self._path))
# get number of cores on the system, and use all minus one
num_of_cores = os.sysconf('SC_NPROCESSORS_ONLN') - 1
transcode_cmd = [
'-i', original_path, # original file to be encoded
'-c:v', 'libx264', # set h264 as videocodec
'-preset', 'slow', # set specific preset that provides a certain encoding speed to compression ratio
'-profile:v', 'baseline', # set output to specific h264 profile
'-level', '3.0', # sets highest compatibility with target devices
'-crf', '20', # set quality
'-b:v', '4M', # set videobitrate to 4Mbps
'-strict', 'experimental', # allow native aac codec below
'-c:a', 'aac', # set aac as audiocodec
'-ac', '2', # force two audiochannels
'-ab', '160k', # set audiobitrate to 160Kbps
'-maxrate', '10000000', # limits max rate, will degrade CRF if needed
'-bufsize', '10000000', # define how much the client should buffer
'-f', 'mp4', # fileformat mp4
'-threads', str(num_of_cores), # number of cores (all minus one)
'-loglevel', 'quiet', # don't display anything
'-y' # don't prompt for overwrite
]
filters = []
info_string = "%s -> mp4, h264" % (os.path.basename(original_path))
message("transcoding", info_string)
if os.path.exists(transcode_path) and file_mtime(transcode_path) >= self._attributes["dateTimeFile"]:
self._video_metadata(transcode_path, False)
return
if "originalSize" in self._attributes:
width = self._attributes["originalSize"][0]
height = self._attributes["originalSize"][1]
if width > height:
# horisontal orientation
if height > 720:
transcode_cmd.append('-vf')
transcode_cmd.append('scale=-1:720')
elif (height > width) or (width == height):
# vertical orientation, or equal sides
if width > 720:
transcode_cmd.append('-vf')
transcode_cmd.append('scale=720:-1')
if "rotate" in self._attributes:
if self._attributes["rotate"] == "90":
filters.append('transpose=1')
elif self._attributes["rotate"] == "180":
filters.append('vflip,hflip')
elif self._attributes["rotate"] == "270":
filters.append('transpose=2')
if len(filters):
transcode_cmd.append('-vf')
transcode_cmd.append(','.join(filters))
tmp_transcode_cmd = transcode_cmd[:]
transcode_cmd.append(transcode_path)
p = VideoTranscodeWrapper().call(*transcode_cmd)
if p == False:
# add another option, try transcoding again
# done to avoid this error;
# x264 [error]: baseline profile doesn't support 4:2:2
message("transcoding failure, trying yuv420p", os.path.basename(original_path))
tmp_transcode_cmd.append('-pix_fmt')
tmp_transcode_cmd.append('yuv420p')
tmp_transcode_cmd.append(transcode_path)
p = VideoTranscodeWrapper().call(*tmp_transcode_cmd)
if p == False:
message("transcoding failure", os.path.basename(original_path))
try:
os.unlink(transcode_path)
except:
pass
self.is_valid = False
return
self._video_metadata(transcode_path, False)
self._thumbnail(mirror, thumb_path, original_path, size[0], size[1])
@property
def name(self):
return os.path.basename(self._path)
@ -531,23 +311,13 @@ class Photo(object):
return self._path
@property
def image_caches(self):
caches = []
if "mediaType" in self._attributes and self._attributes["mediaType"] == "video":
for size in Photo.thumb_sizes:
if size[1]:
caches.append(image_cache(self._path, size[0], size[1]))
caches.append(video_cache(self._path))
else:
caches = [image_cache(self._path, size[0], size[1]) for size in Photo.thumb_sizes]
return caches
return [image_cache(self._path, size[0], size[1]) for size in Photo.thumb_sizes]
@property
def date(self):
correct_date = None;
if not self.is_valid:
correct_date = datetime(1900, 1, 1)
if "dateTimeVideo" in self._attributes:
correct_date = self._attributes["dateTimeVideo"]
elif "dateTimeOriginal" in self._attributes:
if "dateTimeOriginal" in self._attributes:
correct_date = self._attributes["dateTimeOriginal"]
elif "dateTime" in self._attributes:
correct_date = self._attributes["dateTime"]
@ -557,11 +327,9 @@ class Photo(object):
def __cmp__(self, other):
date_compare = cmp(self.date, other.date)
if date_compare == 0:
return cmp(self.name, other.name)
return date_compare
@property
def attributes(self):
return self._attributes

View File

@ -68,25 +68,9 @@ class TreeWalker:
if cached_album:
cached_photo = cached_album.photo_from_path(entry)
if cached_photo and file_mtime(entry) <= cached_photo.attributes["dateTimeFile"]:
cache_file = None
if "mediaType" in cached_photo.attributes:
if cached_photo.attributes["mediaType"] == "video":
# if video
cache_file = os.path.join(self.cache_path, video_cache(entry))
else:
# if image
cache_file = os.path.join(self.cache_path, image_cache(entry, 1024, False))
else:
# if image
cache_file = os.path.join(self.cache_path, image_cache(entry, 1024, False))
# at this point we have full path to cache image/video
# check if it actually exists
if os.path.exists(cache_file):
message("cache hit", os.path.basename(entry))
cache_hit = True
photo = cached_photo
message("cache hit", os.path.basename(entry))
cache_hit = True
photo = cached_photo
if not cache_hit:
message("metainfo", os.path.basename(entry))
photo = Photo(entry, self.cache_path)

View File

@ -1,47 +0,0 @@
from CachePath import message
import os
import subprocess
class VideoToolWrapper(object):
def call(self, *args):
path = args[-1]
for tool in self.wrappers:
try:
if self.check_output:
p = subprocess.check_output((tool,) + args)
else:
p = subprocess.call((tool,) + args)
if p > 0:
return False
else:
return "SUCCESS"
except KeyboardInterrupt:
if self.cleanup:
self.remove(path)
raise
except OSError:
continue
except:
if self.cleanup:
self.remove(path)
continue
return p
return False
def remove(self, path):
try:
os.unlink(path)
except:
pass
class VideoTranscodeWrapper(VideoToolWrapper):
def __init__(self):
self.wrappers = ['avconv', 'ffmpeg']
self.check_output = False
self.cleanup = True
class VideoProbeWrapper(VideoToolWrapper):
def __init__(self):
self.wrappers = ['avprobe', 'ffprobe']
self.check_output = True
self.cleanup = False

View File

@ -1,19 +0,0 @@
AddOutputFilterByType DEFLATE text/text text/html text/plain text/xml text/css application/x-javascript application/javascript application/json
<FilesMatch "\.(jpg|otf|ico)$">
Header set Cache-Control "max-age=29030400, public"
</FilesMatch>
<FilesMatch "\.(css|js)$">
Header set Cache-Control "max-age=5184000, public"
</FilesMatch>
<FilesMatch "index.html">
Header set Cache-Control "max-age=2678400, public"
</FilesMatch>
<FilesMatch "\.json$">
Header set Cache-Control "max-age=3600, public"
</FilesMatch>
<FilesMatch "Makefile">
deny from all
</FilesMatch>

69
web/Makefile Normal file
View File

@ -0,0 +1,69 @@
JS_DIR = js
CSS_DIR = css
JS_MIN = $(JS_DIR)/scripts.min.js
CSS_MIN = $(CSS_DIR)/styles.min.css
JS_MIN_FILES := $(sort $(patsubst %.js, %.min.js, $(filter-out %.min.js, $(wildcard $(JS_DIR)/*.js))))
CSS_MIN_FILES := $(sort $(patsubst %.css, %.min.css, $(filter-out %.min.css, $(wildcard $(CSS_DIR)/*.css))))
JS_COMPILER := java -jar bin/closure-compiler.jar --warning_level QUIET
CSS_COMPILER := java -jar bin/yui-compressor.jar --type css
DEBUG ?= 0
.PHONY: all deploy clean
all: $(JS_MIN) $(CSS_MIN)
ifeq ($(DEBUG),0)
%.min.js: %.js
@echo " JS " $@
@$(JS_COMPILER) --js $< --js_output_file $@
else
%.min.js: %.js
@echo " JS " $@
@cat $< > $@
endif
%.min.css: %.css
@echo " CSS " $@
@$(CSS_COMPILER) -o $@ $<
$(JS_MIN): $(JS_MIN_FILES)
@echo " CAT " $@
@cat $^ > $@
$(CSS_MIN): $(CSS_MIN_FILES)
@echo " CAT " $@
@cat $^ > $@
clean:
@echo " RM " $(JS_MIN) $(JS_MIN_FILES) $(CSS_MIN) $(CSS_MIN_FILES)
@rm -fv $(JS_MIN) $(JS_MIN_FILES) $(CSS_MIN) $(CSS_MIN_FILES)
include ../deployment-config.mk
SSH_OPTS := -q -o ControlMaster=auto -o ControlPath=.ssh-deployment.sock
deploy: all
@echo " SSH $(WEB_SERVER)"
@ssh $(SSH_OPTS) -Nf $(WEB_SERVER)
@echo " RSYNC . $(WEB_SERVER):$(HTDOCS_PATH)"
@ssh -t $(SSH_OPTS) $(WEB_SERVER) "sudo -u $(HTDOCS_USER) -v"
@rsync -aizm --delete-excluded --exclude=.ssh-deployment.sock --exclude=Makefile --exclude=*.swp \
--exclude=bin/ --include=scripts.min.js --include=styles.min.css \
--exclude=*.js --exclude=*.css --rsh="ssh $(SSH_OPTS)" \
--rsync-path="sudo -n -u $(HTDOCS_USER) rsync" \
. "$(WEB_SERVER):$(HTDOCS_PATH)"
@echo " CHOWN $(HTDOCS_USER):$(HTDOCS_USER) $(WEB_SERVER):$(HTDOCS_PATH)"
@ssh -t $(SSH_OPTS) $(WEB_SERVER) "sudo chown -R $(HTDOCS_USER):$(HTDOCS_USER) '$(HTDOCS_PATH)'"
@echo " CHMOD 750/640 $(WEB_SERVER):$(HTDOCS_PATH)"
@ssh -t $(SSH_OPTS) $(WEB_SERVER) "sudo find '$(HTDOCS_PATH)' -type f -exec chmod 640 {} \;; \
sudo find '$(HTDOCS_PATH)' -type d -exec chmod 750 {} \;;"
@echo " SSH $(WEB_SERVER)"
@ssh -O exit $(SSH_OPTS) $(WEB_SERVER)

Binary file not shown.

BIN
web/bin/yui-compressor.jar Normal file

Binary file not shown.

View File

@ -1,3 +0,0 @@
<FilesMatch "(?<!min)\.css">
deny from all
</FilesMatch>

View File

@ -91,14 +91,9 @@ a:hover {
right: 0;
text-align: center;
}
#photo-box, #video-box {
#photo-box {
display: inline;
}
#video-box-inner {
position: absolute;
top: 50%;
width: 100%;
}
#photo-links {
background-color: #000000;
font-weight: bold;
@ -199,14 +194,6 @@ a:hover {
font-weight: bold;
font-style: italic;
}
#video-unsupported {
background-image: url(../img/video-unsupported.png);
background-position: top center;
background-repeat: no-repeat;
padding-top: 96px;
}
#auth-text input {
color: rgb(0, 0, 0);
background-color: rgb(200, 200, 200);

View File

@ -1,12 +0,0 @@
#!/bin/bash
# minify all .css-files
ls -1 *.css|grep -Ev "min.css$" | while read cssfile; do
newfile="${cssfile%.*}.min.css"
echo "$cssfile --> $newfile"
curl -X POST -s --data-urlencode "input@$cssfile" http://cssminifier.com/raw > $newfile
done
# merge all into one single file
rm -f styles.min.css
cat *.min.css > styles.min.css

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.1 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.0 KiB

View File

@ -20,10 +20,6 @@
<div id="metadata"></div>
</div>
</div>
<div id="video-box">
<div id="video-box-inner">
</div>
</div>
<a id="back">&lsaquo;</a>
<a id="next">&rsaquo;</a>

View File

@ -1,3 +0,0 @@
<FilesMatch "(?<!min)\.js">
deny from all
</FilesMatch>

File diff suppressed because it is too large Load Diff

View File

@ -1,221 +1,78 @@
/*!
* jQuery Mousewheel 3.1.12
/*! Copyright (c) 2010 Brandon Aaron (http://brandonaaron.net)
* Licensed under the MIT License (LICENSE.txt).
*
* Copyright 2014 jQuery Foundation and other contributors
* Released under the MIT license.
* http://jquery.org/license
* Thanks to: http://adomas.org/javascript-mouse-wheel/ for some pointers.
* Thanks to: Mathias Bank(http://www.mathias-bank.de) for a scope bug fix.
* Thanks to: Seamus Leahy for adding deltaX and deltaY
*
* Version: 3.0.4
*
* Requires: 1.2.2+
*/
(function (factory) {
if ( typeof define === 'function' && define.amd ) {
// AMD. Register as an anonymous module.
define(['jquery'], factory);
} else if (typeof exports === 'object') {
// Node/CommonJS style for Browserify
module.exports = factory;
} else {
// Browser globals
factory(jQuery);
}
}(function ($) {
(function($) {
var toFix = ['wheel', 'mousewheel', 'DOMMouseScroll', 'MozMousePixelScroll'],
toBind = ( 'onwheel' in document || document.documentMode >= 9 ) ?
['wheel'] : ['mousewheel', 'DomMouseScroll', 'MozMousePixelScroll'],
slice = Array.prototype.slice,
nullLowestDeltaTimeout, lowestDelta;
var types = ['DOMMouseScroll', 'mousewheel'];
if ( $.event.fixHooks ) {
for ( var i = toFix.length; i; ) {
$.event.fixHooks[ toFix[--i] ] = $.event.mouseHooks;
}
}
var special = $.event.special.mousewheel = {
version: '3.1.12',
setup: function() {
if ( this.addEventListener ) {
for ( var i = toBind.length; i; ) {
this.addEventListener( toBind[--i], handler, false );
}
} else {
this.onmousewheel = handler;
$.event.special.mousewheel = {
setup: function() {
if ( this.addEventListener ) {
for ( var i=types.length; i; ) {
this.addEventListener( types[--i], handler, false );
}
// Store the line height and page height for this particular element
$.data(this, 'mousewheel-line-height', special.getLineHeight(this));
$.data(this, 'mousewheel-page-height', special.getPageHeight(this));
},
} else {
this.onmousewheel = handler;
}
},
teardown: function() {
if ( this.removeEventListener ) {
for ( var i = toBind.length; i; ) {
this.removeEventListener( toBind[--i], handler, false );
}
} else {
this.onmousewheel = null;
teardown: function() {
if ( this.removeEventListener ) {
for ( var i=types.length; i; ) {
this.removeEventListener( types[--i], handler, false );
}
// Clean up the data we added to the element
$.removeData(this, 'mousewheel-line-height');
$.removeData(this, 'mousewheel-page-height');
},
getLineHeight: function(elem) {
var $elem = $(elem),
$parent = $elem['offsetParent' in $.fn ? 'offsetParent' : 'parent']();
if (!$parent.length) {
$parent = $('body');
}
return parseInt($parent.css('fontSize'), 10) || parseInt($elem.css('fontSize'), 10) || 16;
},
getPageHeight: function(elem) {
return $(elem).height();
},
settings: {
adjustOldDeltas: true, // see shouldAdjustOldDeltas() below
normalizeOffset: true // calls getBoundingClientRect for each event
} else {
this.onmousewheel = null;
}
};
}
};
$.fn.extend({
mousewheel: function(fn) {
return fn ? this.bind('mousewheel', fn) : this.trigger('mousewheel');
},
$.fn.extend({
mousewheel: function(fn) {
return fn ? this.bind("mousewheel", fn) : this.trigger("mousewheel");
},
unmousewheel: function(fn) {
return this.unbind('mousewheel', fn);
}
});
unmousewheel: function(fn) {
return this.unbind("mousewheel", fn);
}
});
function handler(event) {
var orgEvent = event || window.event,
args = slice.call(arguments, 1),
delta = 0,
deltaX = 0,
deltaY = 0,
absDelta = 0,
offsetX = 0,
offsetY = 0;
event = $.event.fix(orgEvent);
event.type = 'mousewheel';
function handler(event) {
var orgEvent = event || window.event, args = [].slice.call( arguments, 1 ), delta = 0, returnValue = true, deltaX = 0, deltaY = 0;
event = $.event.fix(orgEvent);
event.type = "mousewheel";
// Old school scrollwheel delta
if ( 'detail' in orgEvent ) { deltaY = orgEvent.detail * -1; }
if ( 'wheelDelta' in orgEvent ) { deltaY = orgEvent.wheelDelta; }
if ( 'wheelDeltaY' in orgEvent ) { deltaY = orgEvent.wheelDeltaY; }
if ( 'wheelDeltaX' in orgEvent ) { deltaX = orgEvent.wheelDeltaX * -1; }
// Old school scrollwheel delta
if ( event.wheelDelta ) { delta = event.wheelDelta/120; }
if ( event.detail ) { delta = -event.detail/3; }
// Firefox < 17 horizontal scrolling related to DOMMouseScroll event
if ( 'axis' in orgEvent && orgEvent.axis === orgEvent.HORIZONTAL_AXIS ) {
deltaX = deltaY * -1;
deltaY = 0;
}
// New school multidimensional scroll (touchpads) deltas
deltaY = delta;
// Set delta to be deltaY or deltaX if deltaY is 0 for backwards compatabilitiy
delta = deltaY === 0 ? deltaX : deltaY;
// New school wheel delta (wheel event)
if ( 'deltaY' in orgEvent ) {
deltaY = orgEvent.deltaY * -1;
delta = deltaY;
}
if ( 'deltaX' in orgEvent ) {
deltaX = orgEvent.deltaX;
if ( deltaY === 0 ) { delta = deltaX * -1; }
}
// No change actually happened, no reason to go any further
if ( deltaY === 0 && deltaX === 0 ) { return; }
// Need to convert lines and pages to pixels if we aren't already in pixels
// There are three delta modes:
// * deltaMode 0 is by pixels, nothing to do
// * deltaMode 1 is by lines
// * deltaMode 2 is by pages
if ( orgEvent.deltaMode === 1 ) {
var lineHeight = $.data(this, 'mousewheel-line-height');
delta *= lineHeight;
deltaY *= lineHeight;
deltaX *= lineHeight;
} else if ( orgEvent.deltaMode === 2 ) {
var pageHeight = $.data(this, 'mousewheel-page-height');
delta *= pageHeight;
deltaY *= pageHeight;
deltaX *= pageHeight;
}
// Store lowest absolute delta to normalize the delta values
absDelta = Math.max( Math.abs(deltaY), Math.abs(deltaX) );
if ( !lowestDelta || absDelta < lowestDelta ) {
lowestDelta = absDelta;
// Adjust older deltas if necessary
if ( shouldAdjustOldDeltas(orgEvent, absDelta) ) {
lowestDelta /= 40;
}
}
// Adjust older deltas if necessary
if ( shouldAdjustOldDeltas(orgEvent, absDelta) ) {
// Divide all the things by 40!
delta /= 40;
deltaX /= 40;
deltaY /= 40;
}
// Get a whole, normalized value for the deltas
delta = Math[ delta >= 1 ? 'floor' : 'ceil' ](delta / lowestDelta);
deltaX = Math[ deltaX >= 1 ? 'floor' : 'ceil' ](deltaX / lowestDelta);
deltaY = Math[ deltaY >= 1 ? 'floor' : 'ceil' ](deltaY / lowestDelta);
// Normalise offsetX and offsetY properties
if ( special.settings.normalizeOffset && this.getBoundingClientRect ) {
var boundingRect = this.getBoundingClientRect();
offsetX = event.clientX - boundingRect.left;
offsetY = event.clientY - boundingRect.top;
}
// Add information to the event object
event.deltaX = deltaX;
event.deltaY = deltaY;
event.deltaFactor = lowestDelta;
event.offsetX = offsetX;
event.offsetY = offsetY;
// Go ahead and set deltaMode to 0 since we converted to pixels
// Although this is a little odd since we overwrite the deltaX/Y
// properties with normalized deltas.
event.deltaMode = 0;
// Add event and delta to the front of the arguments
args.unshift(event, delta, deltaX, deltaY);
// Clearout lowestDelta after sometime to better
// handle multiple device types that give different
// a different lowestDelta
// Ex: trackpad = 3 and mouse wheel = 120
if (nullLowestDeltaTimeout) { clearTimeout(nullLowestDeltaTimeout); }
nullLowestDeltaTimeout = setTimeout(nullLowestDelta, 200);
return ($.event.dispatch || $.event.handle).apply(this, args);
// Gecko
if ( orgEvent.axis !== undefined && orgEvent.axis === orgEvent.HORIZONTAL_AXIS ) {
deltaY = 0;
deltaX = -1*delta;
}
function nullLowestDelta() {
lowestDelta = null;
}
// Webkit
if ( orgEvent.wheelDeltaY !== undefined ) { deltaY = orgEvent.wheelDeltaY/120; }
if ( orgEvent.wheelDeltaX !== undefined ) { deltaX = -1*orgEvent.wheelDeltaX/120; }
function shouldAdjustOldDeltas(orgEvent, absDelta) {
// If this is an older event and the delta is divisable by 120,
// then we are assuming that the browser is treating this as an
// older mouse wheel event and that we should divide the deltas
// by 40 to try and get a more usable deltaFactor.
// Side note, this actually impacts the reported scroll distance
// in older browsers and can cause scrolling to be slower than native.
// Turn this off by setting $.event.special.mousewheel.settings.adjustOldDeltas to false.
return special.settings.adjustOldDeltas && orgEvent.type === 'mousewheel' && absDelta % 120 === 0;
}
// Add event and delta to the front of the arguments
args.unshift(event, delta, deltaX, deltaY);
}));
return $.event.handle.apply(this, args);
}
})(jQuery);

File diff suppressed because it is too large Load Diff

View File

@ -147,9 +147,6 @@
hash = hash.substring(5);
return "cache/" + hash;
};
PhotoFloat.videoPath = function(album, video) {
return "cache/" + PhotoFloat.cachePath(PhotoFloat.photoHash(album, video) + ".mp4");
};
PhotoFloat.originalPhotoPath = function(album, photo) {
return "albums/" + album.path + "/" + photo.name;
};
@ -182,7 +179,6 @@
PhotoFloat.prototype.photoHash = PhotoFloat.photoHash;
PhotoFloat.prototype.albumHash = PhotoFloat.albumHash;
PhotoFloat.prototype.photoPath = PhotoFloat.photoPath;
PhotoFloat.prototype.videoPath = PhotoFloat.videoPath;
PhotoFloat.prototype.originalPhotoPath = PhotoFloat.originalPhotoPath;
PhotoFloat.prototype.trimExtension = PhotoFloat.trimExtension;
PhotoFloat.prototype.cleanHash = PhotoFloat.cleanHash;

View File

@ -27,7 +27,7 @@ $(document).ready(function() {
var previousPhoto = null;
var originalTitle = document.title;
var photoFloat = new PhotoFloat();
var maxSize = 1024;
var maxSize = 800;
/* Displays */
@ -98,8 +98,6 @@ $(document).ready(function() {
for (i = 0; i < currentAlbum.photos.length; ++i) {
link = $("<a href=\"#!/" + photoFloat.photoHash(currentAlbum, currentAlbum.photos[i]) + "\"></a>");
image = $("<img title=\"" + photoFloat.trimExtension(currentAlbum.photos[i].name) + "\" alt=\"" + photoFloat.trimExtension(currentAlbum.photos[i].name) + "\" src=\"" + photoFloat.photoPath(currentAlbum, currentAlbum.photos[i], 150, true) + "\" height=\"150\" width=\"150\" />");
if (currentAlbum.photos[i].mediaType == "video")
image.css("background-image", "url(" + image.attr("src") + ")").attr("src", "img/video-icon.png");
image.get(0).photo = currentAlbum.photos[i];
link.append(image);
photos.push(link);
@ -147,8 +145,6 @@ $(document).ready(function() {
$("#album-view").removeClass("photo-view-container");
$("#subalbums").show();
$("#photo-view").hide();
$("#video-box-inner").empty();
$("#video-box").hide();
}
setTimeout(scrollToThumb, 1);
}
@ -168,69 +164,26 @@ $(document).ready(function() {
else if (image.css("height") !== "100%")
image.css("height", "100%").css("width", "auto").css("position", "").css("bottom", "");
}
function scaleVideo() {
var video, container;
video = $("#video");
if (video.get(0) === this)
$(window).bind("resize", scaleVideo);
container = $("#photo-view");
if (video.attr("width") > container.width() && container.height() * video.attr("ratio") > container.width())
video.css("width", container.width()).css("height", container.width() / video.attr("ratio")).parent().css("height", container.width() / video.attr("ratio")).css("margin-top", - container.width() / video.attr("ratio") / 2).css("top", "50%");
else if (video.attr("height") > container.height() && container.height() * video.attr("ratio") < container.width())
video.css("height", container.height()).css("width", container.height() * video.attr("ratio")).parent().css("height", "100%").css("margin-top", "0").css("top", "0");
else
video.css("height", "").css("width", "").parent().css("height", video.attr("height")).css("margin-top", - video.attr("height") / 2).css("top", "50%");
}
function showPhoto() {
var width, height, photoSrc, videoSrc, previousPhoto, nextPhoto, nextLink, text;
var width, height, photoSrc, previousPhoto, nextPhoto, nextLink, text;
width = currentPhoto.size[0];
height = currentPhoto.size[1];
if (currentPhoto.mediaType == "video") {
$("#video-box-inner").empty();
if (!Modernizr.video) {
$('<div id="video-unsupported"><p>Sorry, your browser doesn\'t support the HTML5 &lt;video&gt; element!</p><p>Here\'s a <a href="http://caniuse.com/video">list of which browsers do</a>.</p></div>').appendTo('#video-box-inner');
}
else if (!Modernizr.video.h264) {
$('<div id="video-unsupported"><p>Sorry, your browser doesn\'t support the H.264 video format!</p></div>').appendTo('#video-box-inner');
} else {
$(window).unbind("resize", scaleVideo);
$(window).unbind("resize", scaleImage);
videoSrc = photoFloat.videoPath(currentAlbum, currentPhoto);
$('<video/>', { id: 'video', controls: true }).appendTo('#video-box-inner')
.attr("width", width).attr("height", height).attr("ratio", currentPhoto.size[0] / currentPhoto.size[1])
.attr("src", videoSrc)
.attr("alt", currentPhoto.name)
.on('loadstart', scaleVideo);
}
$("head").append("<link rel=\"video_src\" href=\"" + videoSrc + "\" />");
$("#video-box-inner").css('height', height + 'px').css('margin-top', - height / 2);
$("#photo-box").hide();
$("#video-box").show();
if (width > height) {
height = height / width * maxSize;
width = maxSize;
} else {
width = currentPhoto.size[0];
height = currentPhoto.size[1];
if (width > height) {
height = height / width * maxSize;
width = maxSize;
} else {
width = width / height * maxSize;
height = maxSize;
}
$(window).unbind("resize", scaleVideo);
$(window).unbind("resize", scaleImage);
photoSrc = photoFloat.photoPath(currentAlbum, currentPhoto, maxSize, false);
$("#photo")
.attr("width", width).attr("height", height).attr("ratio", currentPhoto.size[0] / currentPhoto.size[1])
.attr("src", photoSrc)
.attr("alt", currentPhoto.name)
.attr("title", currentPhoto.date)
.load(scaleImage);
$("head").append("<link rel=\"image_src\" href=\"" + photoSrc + "\" />");
$("#video-box-inner").empty();
$("#video-box").hide();
$("#photo-box").show();
width = width / height * maxSize;
height = maxSize;
}
$(window).unbind("resize", scaleImage);
photoSrc = photoFloat.photoPath(currentAlbum, currentPhoto, maxSize, false);
$("#photo")
.attr("width", width).attr("height", height).attr("ratio", currentPhoto.size[0] / currentPhoto.size[1])
.attr("src", photoSrc)
.attr("alt", currentPhoto.name)
.attr("title", currentPhoto.date)
.load(scaleImage);
$("head").append("<link rel=\"image_src\" href=\"" + photoSrc + "\" />");
previousPhoto = currentAlbum.photos[
(currentPhotoIndex - 1 < 0) ? (currentAlbum.photos.length - 1) : (currentPhotoIndex - 1)
@ -304,9 +257,9 @@ $(document).ready(function() {
currentPhoto = photo;
currentPhotoIndex = photoIndex;
setTitle();
showAlbum(previousAlbum !== currentAlbum);
if (photo !== null)
showPhoto();
showAlbum(previousAlbum !== currentAlbum);
}
/* Event listeners */
@ -314,7 +267,6 @@ $(document).ready(function() {
$(window).hashchange(function() {
$("#loading").show();
$("link[rel=image_src]").remove();
$("link[rel=video_src]").remove();
photoFloat.parseHash(location.hash, hashParsed, die);
});
$(window).hashchange();
@ -358,7 +310,7 @@ $(document).ready(function() {
$("#fullscreen-divider").show();
$("#fullscreen").show().click(function() {
$("#photo").fullScreen({callback: function(isFullscreen) {
maxSize = isFullscreen ? 1600 : 1024;
maxSize = isFullscreen ? 1024 : 800;
showPhoto();
}});
});

View File

@ -1,12 +0,0 @@
#!/bin/bash
# minify all .js-files
ls -1 *.js|grep -Ev "min.js$" | while read jsfile; do
newfile="${jsfile%.*}.min.js"
echo "$jsfile --> $newfile"
curl -X POST -s --data-urlencode "input@$jsfile" http://javascript-minifier.com/raw > $newfile
done
# merge all into one single file
rm -f scripts.min.js
cat *.min.js > scripts.min.js