Use other timestamp for videos.
Easier to manipulate timestamp via exiftool this way.
This commit is contained in:
parent
1cc1ec02aa
commit
5cbc3f31e3
@ -238,14 +238,14 @@ class Photo(object):
|
|||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
raise
|
raise
|
||||||
except TypeError:
|
except TypeError:
|
||||||
self._attributes["dateTimeOriginal"] = exif["DateTimeOriginal"]
|
self._attributes["dateTimeOriginal"] = exif["DateTimeOriginal"]
|
||||||
if "DateTime" in exif:
|
if "DateTime" in exif:
|
||||||
try:
|
try:
|
||||||
self._attributes["dateTime"] = datetime.strptime(exif["DateTime"], '%Y:%m:%d %H:%M:%S')
|
self._attributes["dateTime"] = datetime.strptime(exif["DateTime"], '%Y:%m:%d %H:%M:%S')
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
raise
|
raise
|
||||||
except TypeError:
|
except TypeError:
|
||||||
self._attributes["dateTime"] = exif["DateTime"]
|
self._attributes["dateTime"] = exif["DateTime"]
|
||||||
|
|
||||||
_photo_metadata.flash_dictionary = {0x0: "No Flash", 0x1: "Fired",0x5: "Fired, Return not detected",0x7: "Fired, Return detected",0x8: "On, Did not fire",0x9: "On, Fired",0xd: "On, Return not detected",0xf: "On, Return detected",0x10: "Off, Did not fire",0x14: "Off, Did not fire, Return not detected",0x18: "Auto, Did not fire",0x19: "Auto, Fired",0x1d: "Auto, Fired, Return not detected",0x1f: "Auto, Fired, Return detected",0x20: "No flash function",0x30: "Off, No flash function",0x41: "Fired, Red-eye reduction",0x45: "Fired, Red-eye reduction, Return not detected",0x47: "Fired, Red-eye reduction, Return detected",0x49: "On, Red-eye reduction",0x4d: "On, Red-eye reduction, Return not detected",0x4f: "On, Red-eye reduction, Return detected",0x50: "Off, Red-eye reduction",0x58: "Auto, Did not fire, Red-eye reduction",0x59: "Auto, Fired, Red-eye reduction",0x5d: "Auto, Fired, Red-eye reduction, Return not detected",0x5f: "Auto, Fired, Red-eye reduction, Return detected"}
|
_photo_metadata.flash_dictionary = {0x0: "No Flash", 0x1: "Fired",0x5: "Fired, Return not detected",0x7: "Fired, Return detected",0x8: "On, Did not fire",0x9: "On, Fired",0xd: "On, Return not detected",0xf: "On, Return detected",0x10: "Off, Did not fire",0x14: "Off, Did not fire, Return not detected",0x18: "Auto, Did not fire",0x19: "Auto, Fired",0x1d: "Auto, Fired, Return not detected",0x1f: "Auto, Fired, Return detected",0x20: "No flash function",0x30: "Off, No flash function",0x41: "Fired, Red-eye reduction",0x45: "Fired, Red-eye reduction, Return not detected",0x47: "Fired, Red-eye reduction, Return detected",0x49: "On, Red-eye reduction",0x4d: "On, Red-eye reduction, Return not detected",0x4f: "On, Red-eye reduction, Return detected",0x50: "Off, Red-eye reduction",0x58: "Auto, Did not fire, Red-eye reduction",0x59: "Auto, Fired, Red-eye reduction",0x5d: "Auto, Fired, Red-eye reduction, Return not detected",0x5f: "Auto, Fired, Red-eye reduction, Return detected"}
|
||||||
_photo_metadata.light_source_dictionary = {0: "Unknown", 1: "Daylight", 2: "Fluorescent", 3: "Tungsten (incandescent light)", 4: "Flash", 9: "Fine weather", 10: "Cloudy weather", 11: "Shade", 12: "Daylight fluorescent (D 5700 - 7100K)", 13: "Day white fluorescent (N 4600 - 5400K)", 14: "Cool white fluorescent (W 3900 - 4500K)", 15: "White fluorescent (WW 3200 - 3700K)", 17: "Standard light A", 18: "Standard light B", 19: "Standard light C", 20: "D55", 21: "D65", 22: "D75", 23: "D50", 24: "ISO studio tungsten"}
|
_photo_metadata.light_source_dictionary = {0: "Unknown", 1: "Daylight", 2: "Fluorescent", 3: "Tungsten (incandescent light)", 4: "Flash", 9: "Fine weather", 10: "Cloudy weather", 11: "Shade", 12: "Daylight fluorescent (D 5700 - 7100K)", 13: "Day white fluorescent (N 4600 - 5400K)", 14: "Cool white fluorescent (W 3900 - 4500K)", 15: "White fluorescent (WW 3200 - 3700K)", 17: "Standard light A", 18: "Standard light B", 19: "Standard light C", 20: "D55", 21: "D65", 22: "D75", 23: "D50", 24: "ISO studio tungsten"}
|
||||||
@ -258,26 +258,39 @@ class Photo(object):
|
|||||||
|
|
||||||
|
|
||||||
def _video_metadata(self, path, original=True):
|
def _video_metadata(self, path, original=True):
|
||||||
p = VideoProbeWrapper().call('-show_format', '-show_streams', '-of', 'json', '-loglevel', '0', path)
|
p = VideoProbeWrapper().call('-show_format', '-show_streams', '-of', 'json', '-loglevel', '0', path)
|
||||||
if p == False:
|
if p == False:
|
||||||
self.is_valid = False
|
self.is_valid = False
|
||||||
return
|
return
|
||||||
info = json.loads(p)
|
info = json.loads(p)
|
||||||
for s in info["streams"]:
|
for s in info["streams"]:
|
||||||
if 'codec_type' in s and s['codec_type'] == 'video':
|
if 'codec_type' in s and s['codec_type'] == 'video':
|
||||||
self._attributes["mediaType"] = "video"
|
self._attributes["mediaType"] = "video"
|
||||||
self._attributes["size"] = (int(s["width"]), int(s["height"]))
|
self._attributes["size"] = (int(s["width"]), int(s["height"]))
|
||||||
if "duration" in s:
|
if "duration" in s:
|
||||||
self._attributes["duration"] = s["duration"]
|
self._attributes["duration"] = s["duration"]
|
||||||
if "tags" in s and "rotate" in s["tags"]:
|
if "tags" in s and "rotate" in s["tags"]:
|
||||||
self._attributes["rotate"] = s["tags"]["rotate"]
|
self._attributes["rotate"] = s["tags"]["rotate"]
|
||||||
if original:
|
if original:
|
||||||
self._attributes["originalSize"] = (int(s["width"]), int(s["height"]))
|
self._attributes["originalSize"] = (int(s["width"]), int(s["height"]))
|
||||||
|
# we break, because a video can contain several streams
|
||||||
|
# this way we only get/use values from the first stream
|
||||||
break
|
break
|
||||||
|
|
||||||
|
# use time from EXIF (rather than file creation)
|
||||||
|
if info['format']['tags']['creation_time']:
|
||||||
|
# we have time modifiable via exif
|
||||||
|
# lets use this
|
||||||
|
|
||||||
def _photo_thumbnail(self, original_path, thumb_path, size, square=False):
|
try:
|
||||||
try:
|
self._attributes["videoCreateDate"] = datetime.strptime(info['format']['tags']['creation_time'], '%Y-%m-%d %H:%M:%S')
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
raise
|
||||||
|
except TypeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _photo_thumbnail(self, original_path, thumb_path, size, square=False):
|
||||||
|
try:
|
||||||
image = Image.open(original_path)
|
image = Image.open(original_path)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
raise
|
raise
|
||||||
@ -305,13 +318,13 @@ class Photo(object):
|
|||||||
# Vertical Mirror + Rotation 270
|
# Vertical Mirror + Rotation 270
|
||||||
mirror = image.transpose(Image.FLIP_LEFT_RIGHT).transpose(Image.ROTATE_270)
|
mirror = image.transpose(Image.FLIP_LEFT_RIGHT).transpose(Image.ROTATE_270)
|
||||||
elif self._orientation == 8:
|
elif self._orientation == 8:
|
||||||
# Rotation 90
|
# Rotation 90
|
||||||
mirror = image.transpose(Image.ROTATE_90)
|
mirror = image.transpose(Image.ROTATE_90)
|
||||||
|
|
||||||
image = mirror
|
image = mirror
|
||||||
self._thumbnail(image, original_path, thumb_path, size, square)
|
self._thumbnail(image, original_path, thumb_path, size, square)
|
||||||
|
|
||||||
def _thumbnail(self, image, original_path, thumb_path, size, square):
|
def _thumbnail(self, image, original_path, thumb_path, size, square):
|
||||||
thumb_path = os.path.join(thumb_path, image_cache(self._path, size, square))
|
thumb_path = os.path.join(thumb_path, image_cache(self._path, size, square))
|
||||||
info_string = "%s -> %spx" % (os.path.basename(original_path), str(size))
|
info_string = "%s -> %spx" % (os.path.basename(original_path), str(size))
|
||||||
if square:
|
if square:
|
||||||
@ -362,54 +375,54 @@ class Photo(object):
|
|||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def _photo_thumbnails(self, original_path, thumb_path):
|
def _photo_thumbnails(self, original_path, thumb_path):
|
||||||
# get number of cores on the system, and use all minus one
|
# get number of cores on the system, and use all minus one
|
||||||
num_of_cores = os.sysconf('SC_NPROCESSORS_ONLN') - 1
|
num_of_cores = os.sysconf('SC_NPROCESSORS_ONLN') - 1
|
||||||
pool = Pool(processes=num_of_cores)
|
pool = Pool(processes=num_of_cores)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
for size in Photo.thumb_sizes:
|
for size in Photo.thumb_sizes:
|
||||||
pool.apply_async(make_photo_thumbs, args = (self, original_path, thumb_path, size))
|
pool.apply_async(make_photo_thumbs, args = (self, original_path, thumb_path, size))
|
||||||
except:
|
except:
|
||||||
pool.terminate()
|
pool.terminate()
|
||||||
|
|
||||||
pool.close()
|
pool.close()
|
||||||
pool.join()
|
pool.join()
|
||||||
|
|
||||||
def _video_thumbnails(self, thumb_path, original_path):
|
def _video_thumbnails(self, thumb_path, original_path):
|
||||||
(tfd, tfn) = tempfile.mkstemp();
|
(tfd, tfn) = tempfile.mkstemp();
|
||||||
p = VideoTranscodeWrapper().call(
|
p = VideoTranscodeWrapper().call(
|
||||||
'-i', original_path, # original file to extract thumbs from
|
'-i', original_path, # original file to extract thumbs from
|
||||||
'-f', 'image2', # extract image
|
'-f', 'image2', # extract image
|
||||||
'-vsync', '1', # CRF
|
'-vsync', '1', # CRF
|
||||||
'-vframes', '1', # extrat 1 single frame
|
'-vframes', '1', # extrat 1 single frame
|
||||||
'-an', # disable audio
|
'-an', # disable audio
|
||||||
'-loglevel', 'quiet', # don't display anything
|
'-loglevel', 'quiet', # don't display anything
|
||||||
'-y', # don't prompt for overwrite
|
'-y', # don't prompt for overwrite
|
||||||
tfn # temporary file to store extracted image
|
tfn # temporary file to store extracted image
|
||||||
)
|
)
|
||||||
if p == False:
|
if p == False:
|
||||||
message("couldn't extract video frame", os.path.basename(original_path))
|
message("couldn't extract video frame", os.path.basename(original_path))
|
||||||
try:
|
try:
|
||||||
os.unlink(tfn)
|
os.unlink(tfn)
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
self.is_valid = False
|
self.is_valid = False
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
image = Image.open(tfn)
|
image = Image.open(tfn)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
try:
|
try:
|
||||||
os.unlink(tfn)
|
os.unlink(tfn)
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
raise
|
raise
|
||||||
except:
|
except:
|
||||||
message("couldn't open video thumbnail", tfn)
|
message("couldn't open video thumbnail", tfn)
|
||||||
try:
|
try:
|
||||||
os.unlink(tfn)
|
os.unlink(tfn)
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
self.is_valid = False
|
self.is_valid = False
|
||||||
return
|
return
|
||||||
mirror = image
|
mirror = image
|
||||||
@ -424,14 +437,14 @@ class Photo(object):
|
|||||||
if size[1]:
|
if size[1]:
|
||||||
self._thumbnail(mirror, original_path, thumb_path, size[0], size[1])
|
self._thumbnail(mirror, original_path, thumb_path, size[0], size[1])
|
||||||
try:
|
try:
|
||||||
os.unlink(tfn)
|
os.unlink(tfn)
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def _video_transcode(self, transcode_path, original_path):
|
def _video_transcode(self, transcode_path, original_path):
|
||||||
transcode_path = os.path.join(transcode_path, video_cache(self._path))
|
transcode_path = os.path.join(transcode_path, video_cache(self._path))
|
||||||
# get number of cores on the system, and use all minus one
|
# get number of cores on the system, and use all minus one
|
||||||
num_of_cores = os.sysconf('SC_NPROCESSORS_ONLN') - 1
|
num_of_cores = os.sysconf('SC_NPROCESSORS_ONLN') - 1
|
||||||
transcode_cmd = [
|
transcode_cmd = [
|
||||||
'-i', original_path, # original file to be encoded
|
'-i', original_path, # original file to be encoded
|
||||||
'-c:v', 'libx264', # set h264 as videocodec
|
'-c:v', 'libx264', # set h264 as videocodec
|
||||||
@ -471,28 +484,28 @@ class Photo(object):
|
|||||||
transcode_cmd.append('-vf')
|
transcode_cmd.append('-vf')
|
||||||
transcode_cmd.append(','.join(filters))
|
transcode_cmd.append(','.join(filters))
|
||||||
|
|
||||||
tmp_transcode_cmd = transcode_cmd[:]
|
tmp_transcode_cmd = transcode_cmd[:]
|
||||||
transcode_cmd.append(transcode_path)
|
transcode_cmd.append(transcode_path)
|
||||||
p = VideoTranscodeWrapper().call(*transcode_cmd)
|
p = VideoTranscodeWrapper().call(*transcode_cmd)
|
||||||
if p == False:
|
if p == False:
|
||||||
# add another option, try transcoding again
|
# add another option, try transcoding again
|
||||||
# done to avoid this error;
|
# done to avoid this error;
|
||||||
# x264 [error]: baseline profile doesn't support 4:2:2
|
# x264 [error]: baseline profile doesn't support 4:2:2
|
||||||
message("transcoding failure, trying yuv420p", os.path.basename(original_path))
|
message("transcoding failure, trying yuv420p", os.path.basename(original_path))
|
||||||
tmp_transcode_cmd.append('-pix_fmt')
|
tmp_transcode_cmd.append('-pix_fmt')
|
||||||
tmp_transcode_cmd.append('yuv420p')
|
tmp_transcode_cmd.append('yuv420p')
|
||||||
tmp_transcode_cmd.append(transcode_path)
|
tmp_transcode_cmd.append(transcode_path)
|
||||||
p = VideoTranscodeWrapper().call(*tmp_transcode_cmd)
|
p = VideoTranscodeWrapper().call(*tmp_transcode_cmd)
|
||||||
|
|
||||||
if p == False:
|
if p == False:
|
||||||
message("transcoding failure", os.path.basename(original_path))
|
message("transcoding failure", os.path.basename(original_path))
|
||||||
try:
|
try:
|
||||||
os.unlink(transcode_path)
|
os.unlink(transcode_path)
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
self.is_valid = False
|
self.is_valid = False
|
||||||
return
|
return
|
||||||
self._video_metadata(transcode_path, False)
|
self._video_metadata(transcode_path, False)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def name(self):
|
def name(self):
|
||||||
@ -518,7 +531,9 @@ class Photo(object):
|
|||||||
correct_date = None;
|
correct_date = None;
|
||||||
if not self.is_valid:
|
if not self.is_valid:
|
||||||
correct_date = datetime(1900, 1, 1)
|
correct_date = datetime(1900, 1, 1)
|
||||||
if "dateTimeOriginal" in self._attributes:
|
if "videoCreateDate" in self._attributes:
|
||||||
|
correct_date = self._attributes["videoCreateDate"]
|
||||||
|
elif "dateTimeOriginal" in self._attributes:
|
||||||
correct_date = self._attributes["dateTimeOriginal"]
|
correct_date = self._attributes["dateTimeOriginal"]
|
||||||
elif "dateTime" in self._attributes:
|
elif "dateTime" in self._attributes:
|
||||||
correct_date = self._attributes["dateTime"]
|
correct_date = self._attributes["dateTime"]
|
||||||
|
@ -7,14 +7,14 @@ class VideoToolWrapper(object):
|
|||||||
path = args[-1]
|
path = args[-1]
|
||||||
for tool in self.wrappers:
|
for tool in self.wrappers:
|
||||||
try:
|
try:
|
||||||
if self.check_output:
|
if self.check_output:
|
||||||
p = subprocess.check_output((tool,) + args)
|
p = subprocess.check_output((tool,) + args)
|
||||||
else:
|
else:
|
||||||
p = subprocess.call((tool,) + args)
|
p = subprocess.call((tool,) + args)
|
||||||
if p > 0:
|
if p > 0:
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
return "SUCCESS"
|
return "SUCCESS"
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
if self.cleanup:
|
if self.cleanup:
|
||||||
self.remove(path)
|
self.remove(path)
|
||||||
@ -37,11 +37,11 @@ class VideoToolWrapper(object):
|
|||||||
class VideoTranscodeWrapper(VideoToolWrapper):
|
class VideoTranscodeWrapper(VideoToolWrapper):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.wrappers = ['avconv', 'ffmpeg']
|
self.wrappers = ['avconv', 'ffmpeg']
|
||||||
self.check_output = False
|
self.check_output = False
|
||||||
self.cleanup = True
|
self.cleanup = True
|
||||||
|
|
||||||
class VideoProbeWrapper(VideoToolWrapper):
|
class VideoProbeWrapper(VideoToolWrapper):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.wrappers = ['avprobe', 'ffprobe']
|
self.wrappers = ['avprobe', 'ffprobe']
|
||||||
self.check_output = True
|
self.check_output = True
|
||||||
self.cleanup = False
|
self.cleanup = False
|
||||||
|
Loading…
Reference in New Issue
Block a user