now using uselist=False to ensure file_details, dir_details, in_dir are one-to-one, and dont have useless list of 1 element. updated TODO to reflect order of how to tackle change over to paths and dirs

This commit is contained in:
2021-04-10 11:28:17 +10:00
parent 232c98b484
commit fddd47a1ab
5 changed files with 65 additions and 64 deletions

View File

@@ -95,9 +95,9 @@ class Entry(Base):
type_id = Column(Integer, ForeignKey("file_type.id"))
exists_on_fs=Column(Boolean)
type=relationship("FileType")
dir_details = relationship( "Dir")
file_details = relationship( "File" )
in_dir = relationship ("Dir", secondary="entry_dir_link" )
dir_details = relationship( "Dir", uselist=False )
file_details = relationship( "File", uselist=False )
in_dir = relationship ("Dir", secondary="entry_dir_link", uselist=False )
def __repr__(self):
return f"<id: {self.id}, name: {self.name}, type={self.type}, exists_on_fs={self.exists_on_fs}, dir_details={self.dir_details}, file_details={self.file_details}, in_dir={self.in_dir}>"
@@ -444,10 +444,10 @@ def AddDir(job, dirname, path_prefix, in_dir):
dir=Dir( path_prefix=path_prefix, num_files=0, last_import_date=0 )
dtype=session.query(FileType).filter(FileType.name=='Directory').first()
e=Entry( name=dirname, type=dtype, exists_on_fs=True )
e.dir_details.append(dir)
e.dir_details=dir
# no in_dir occurs when we Add the actual Dir for the import_path (top of the tree)
if in_dir:
e.in_dir.append(in_dir)
e.in_dir=in_dir
if DEBUG==1:
print(f"AddDir: created d={dirname}, pp={path_prefix}")
AddLogForJob(job, f"DEBUG: Process new dir: {dirname}")
@@ -463,8 +463,8 @@ def AddFile(job, fname, type_str, fsize, in_dir, year, month, day, woy ):
ftype = session.query(FileType).filter(FileType.name==type_str).first()
e=Entry( name=fname, type=ftype, exists_on_fs=True )
f=File( size_mb=fsize, last_hash_date=0, faces_created_on=0, year=year, month=month, day=day, woy=woy )
e.file_details.append(f)
e.in_dir.append(in_dir)
e.file_details = f
e.in_dir=in_dir
AddLogForJob(job, "Found new file: {}".format(fname) )
session.add(e)
return e
@@ -643,23 +643,23 @@ def GenHashAndThumb(job, e):
# commit every 100 files to see progress being made but not hammer the database
if job.current_file_num % 100 == 0:
session.commit()
stat = os.stat( e.in_dir[0].path_prefix + '/' + e.name )
if stat.st_ctime < e.file_details[0].last_hash_date:
stat = os.stat( e.in_dir.path_prefix + '/' + e.name )
if stat.st_ctime < e.file_details.last_hash_date:
if DEBUG==1:
print(f"OPTIM: GenHashAndThumb {e.name} file is older than last hash, skip this")
job.current_file_num+=1
return
e.file_details[0].hash = md5( job, e.in_dir[0].path_prefix+'/'+ e.name )
e.file_details.hash = md5( job, e.in_dir.path_prefix+'/'+ e.name )
if DEBUG==1:
print( f"{e.name} - hash={e.file_details[0].hash}" )
print( f"{e.name} - hash={e.file_details.hash}" )
if e.type.name == 'Image':
e.file_details[0].thumbnail = GenImageThumbnail( job, e.in_dir[0].path_prefix+'/'+ e.name )
e.file_details.thumbnail = GenImageThumbnail( job, e.in_dir.path_prefix+'/'+ e.name )
elif e.type.name == 'Video':
e.file_details[0].thumbnail = GenVideoThumbnail( job, e.in_dir[0].path_prefix+'/'+ e.name )
e.file_details.thumbnail = GenVideoThumbnail( job, e.in_dir.path_prefix+'/'+ e.name )
elif e.type.name == 'Unknown':
job.current_file_num+=1
e.file_details[0].last_hash_date = time.time()
e.file_details.last_hash_date = time.time()
return
def ProcessAI(job, e):
@@ -667,29 +667,29 @@ def ProcessAI(job, e):
job.current_file_num+=1
return
file = e.in_dir[0].path_prefix + '/' + e.name
file = e.in_dir.path_prefix + '/' + e.name
stat = os.stat(file)
# find if file is newer than when we found faces before (fyi: first time faces_created_on == 0)
if stat.st_ctime > e.file_details[0].faces_created_on:
if stat.st_ctime > e.file_details.faces_created_on:
session.add(e)
im_orig = Image.open(file)
im = ImageOps.exif_transpose(im_orig)
faces = generateUnknownEncodings(im)
e.file_details[0].faces_created_on=time.time()
e.file_details.faces_created_on=time.time()
if faces:
flat_faces = numpy.array(faces)
e.file_details[0].faces = flat_faces.tobytes()
e.file_details.faces = flat_faces.tobytes()
else:
e.file_details[0].faces = None
e.file_details.faces = None
job.current_file_num+=1
return
else:
if not e.file_details[0].faces:
if not e.file_details.faces:
print("OPTIM: This image has no faces, skip it")
job.current_file_num+=1
return
recover=numpy.frombuffer(e.file_details[0].faces,dtype=numpy.float64)
recover=numpy.frombuffer(e.file_details.faces,dtype=numpy.float64)
real_recover=numpy.reshape(recover,(-1,128))
l=[]
for el in real_recover:
@@ -707,9 +707,9 @@ def lookForPersonInImage(job, person, unknown_encoding, e):
# lets see if we have tried this check before
frl=session.query(FileRefimgLink).filter(FileRefimgLink.file_id==e.id, FileRefimgLink.refimg_id==refimg.id).first()
if not frl:
frl = FileRefimgLink(refimg_id=refimg.id, file_id=e.file_details[0].eid)
frl = FileRefimgLink(refimg_id=refimg.id, file_id=e.file_details.eid)
else:
stat=os.stat(e.in_dir[0].path_prefix+'/'+ e.name)
stat=os.stat(e.in_dir.path_prefix+'/'+ e.name)
# file & refimg are not newer then we dont need to check
if frl.matched and stat.st_ctime < frl.when_processed and refimg.created_on < frl.when_processed:
print(f"OPTIM: lookForPersonInImage: file {e.name} has a previous match for: {refimg.fname}, and the file & refimg haven't changed")
@@ -757,7 +757,7 @@ def compareAI(known_encoding, unknown_encoding):
def ProcessFilesInDir(job, e, file_func):
if DEBUG==1:
print("DEBUG: files in dir - process: {} {}".format(e.name, e.in_dir[0].path_prefix))
print("DEBUG: files in dir - process: {} {}".format(e.name, e.in_dir.path_prefix))
if e.type.name != 'Directory':
file_func(job, e)
else:
@@ -870,10 +870,10 @@ def CheckForDups(job):
def RemoveFileFromFS( del_me ):
try:
settings = session.query(Settings).first()
m=re.search( r'^static/(.+)', del_me.in_dir[0].path_prefix)
m=re.search( r'^static/(.+)', del_me.in_dir.path_prefix)
dst_dir=settings.recycle_bin_path + m[1] + '/'
os.makedirs( dst_dir,mode=0o777, exist_ok=True )
src=del_me.in_dir[0].path_prefix+'/'+del_me.name
src=del_me.in_dir.path_prefix+'/'+del_me.name
dst=dst_dir + '/' + del_me.name
os.replace( src, dst )
except Exception as e:
@@ -903,18 +903,18 @@ def RemoveDups(job):
found=None
del_me_lst = []
for f in files:
if os.path.isfile(f.in_dir[0].path_prefix+'/'+f.name) == False:
AddLogForJob( job, f"ERROR: (per file del) file (DB id: {f.eid} - {f.in_dir[0].path_prefix}/{f.name}) does not exist? ignorning file")
elif f.file_details[0].eid == int(keeping):
if os.path.isfile(f.in_dir.path_prefix+'/'+f.name) == False:
AddLogForJob( job, f"ERROR: (per file del) file (DB id: {f.eid} - {f.in_dir.path_prefix}/{f.name}) does not exist? ignorning file")
elif f.file_details.eid == int(keeping):
found = f
else:
del_me_lst.append(f)
if found == None:
AddLogForJob( job, f"ERROR: (per file dup) Cannot find file with hash={hash} to process - skipping it)" )
else:
AddLogForJob(job, f"Keep duplicate file: {found.in_dir[0].path_prefix}/{found.name}" )
AddLogForJob(job, f"Keep duplicate file: {found.in_dir.path_prefix}/{found.name}" )
for del_me in del_me_lst:
AddLogForJob(job, f"Remove duplicate (per file dup) file: {del_me.in_dir[0].path_prefix}/{del_me.name}" )
AddLogForJob(job, f"Remove duplicate (per file dup) file: {del_me.in_dir.path_prefix}/{del_me.name}" )
RemoveFileFromFS( del_me )
RemoveFileFromDB(del_me.id)
@@ -929,9 +929,9 @@ def RemoveDups(job):
found=None
del_me=None
for f in files:
if os.path.isfile(f.in_dir[0].path_prefix+'/'+f.name) == False:
AddLogForJob( job, f"ERROR: (per path del) file (DB id: {f.eid} - {f.in_dir[0].path_prefix}/{f.name}) does not exist? ignorning file")
if f.in_dir[0].eid == int(keeping):
if os.path.isfile(f.in_dir.path_prefix+'/'+f.name) == False:
AddLogForJob( job, f"ERROR: (per path del) file (DB id: {f.eid} - {f.in_dir.path_prefix}/{f.name}) does not exist? ignorning file")
if f.in_dir.eid == int(keeping):
found=f
else:
del_me=f
@@ -939,8 +939,8 @@ def RemoveDups(job):
if found == None:
AddLogForJob( job, f"ERROR: (per path dup - dir id={keeping}) Cannot find file with hash={hash} to process - skipping it)" )
else:
AddLogForJob(job, f"Keep duplicate file: {found.in_dir[0].path_prefix}/{found.name}" )
AddLogForJob(job, f"Remove duplicate (per path dup) file: {del_me.in_dir[0].path_prefix}/{del_me.name}" )
AddLogForJob(job, f"Keep duplicate file: {found.in_dir.path_prefix}/{found.name}" )
AddLogForJob(job, f"Remove duplicate (per path dup) file: {del_me.in_dir.path_prefix}/{del_me.name}" )
RemoveFileFromFS( del_me )
RemoveFileFromDB(del_me.id)
dup_cnt += 1