first pass/thoughts on faces per file in DB, mostyl commented out. Also put a poor hack in to allow AI jobs to re-run without error, its limited, need to really fix this properly

This commit is contained in:
2021-01-24 00:16:58 +11:00
parent bce7d2a2d2
commit 56f19ff3b7
2 changed files with 31 additions and 7 deletions

View File

@@ -103,6 +103,9 @@ class File(Base):
size_mb = Column(Integer, unique=False, nullable=False)
hash = Column(Integer, unique=True, nullable=True)
thumbnail = Column(String, unique=False, nullable=True)
# DDP: need bytea? in db (see other DDP comment)
# faces =
faces_created_on = Column(Float)
def __repr__(self):
return "<eid: {}, size_mb={}, hash={}>".format(self.eid, self.size_mb, self.hash )
@@ -478,6 +481,10 @@ def JobImportDir(job):
return
def JobProcessAI(job):
print ("DDP: HACK - to allow re-running jobs for now, del FPL");
session.query(File_Person_Link).delete()
#### (delete the above 2 lines)
path=[jex.value for jex in job.extra if jex.name == "path"][0]
path = SymlinkName(path, '/')
print('REMOVE AFTER TESTING ON WINDOWS... path=',path)
@@ -512,11 +519,26 @@ def ProcessAI(job, e):
generateKnownEncodings(person)
file = e.in_dir[0].path_prefix + '/' + e.name
im_orig = Image.open(file)
im = ImageOps.exif_transpose(im_orig)
unknown_encodings = generateUnknownEncodings(im)
stat = os.stat(file)
# only find faces if we have not already OR file is newer than when we found faces before
if not e.file_details[0].faces_created_on or stat.st_ctime > e.file_details[0].faces_created_on:
session.add(e)
im = Image.open(file)
try:
im = ImageOps.exif_transpose(im)
except:
print("DEBUG: looks like image does not have exif")
for unknown_encoding in unknown_encodings:
faces = generateUnknownEncodings(im)
# DDP: uncomment the below to optimise, but I need to store the faces into the DB, not sure how right now
##### is this really 0? or will there be many with the many faces?
# if its many, should we do a faces_file_link???
# e.file_details[0].faces = faces[0].tobytes()
# e.file_details[0].faces_created_on=time.time()
# else:
# faces=numpy.frombuffer(e.file_details[0].faces,dtype=numpy.float64)
for unknown_encoding in faces:
for person in people:
lookForPersonInImage(job, person, unknown_encoding, e)
AddLogForJob(job, f"Finished processing {e.name}", e.name )
@@ -524,9 +546,11 @@ def ProcessAI(job, e):
def lookForPersonInImage(job, person, unknown_encoding, e):
for refimg in person.refimg:
###
# need a date_stamp in refimg_file_link, but we currently have a person_file_link
# should consider whether we break this into just a scan ( id, refimg, file, date, threshold, etc.)
###
deserialized_bytes = numpy.frombuffer(refimg.encodings, dtype=numpy.float64)
#deserialized_x = numpy.reshape(deserialized_bytes, newshape=(2,2))
results = compareAI(deserialized_bytes, unknown_encoding)
if results[0]:
print(f'Found a match between: {person.tag} and {e.name}')