rewrote dups.html to now use newer model, where we will auto-delete those that match the regexp, and not show a per row view of these. Also removed extra / unneeded line when processing deleting files
This commit is contained in:
15
dups.py
15
dups.py
@@ -209,28 +209,18 @@ class Duplicates:
|
||||
d2=""
|
||||
did1=""
|
||||
did2=""
|
||||
str=""
|
||||
dup_cnt=1
|
||||
hashes=""
|
||||
for hash in self.dups_to_process:
|
||||
if self.overall_dup_cnt<2:
|
||||
print(f"process {hash}")
|
||||
# more than 2 files (just ask per file) OR
|
||||
# only 2 copies, and files are in same dir (so must be diff name, so just ask) OR
|
||||
# content same, filename different (just ask per file)
|
||||
# more than 2 files (just ask per file) OR only 2 copies, and files are in same dir (so must be diff name, so just ask) OR content same, filename different (ask per file)
|
||||
if (len(self.dups_to_process[hash]) > 2) or (self.dups_to_process[hash][0].f != self.dups_to_process[hash][1].f) or (self.dups_to_process[hash][0].d == self.dups_to_process[hash][1].d):
|
||||
self.per_file_dups.append(self.dups_to_process[hash])
|
||||
self.overall_dup_cnt += len(self.dups_to_process[hash])
|
||||
self.overall_dup_sets += 1
|
||||
if self.overall_dup_cnt<2:
|
||||
print( f"process as len(el)={len(self.dups_to_process[hash])}" )
|
||||
for el in self.dups_to_process[hash]:
|
||||
if re.search( '\d{4}/\d{8}', el.d):
|
||||
self.preferred_file[hash] = el.id
|
||||
if self.overall_dup_cnt<25:
|
||||
print( f"{self.dups_to_process[hash]} <- keeping {el.id} -- {self.preferred_file[hash]}" )
|
||||
# by here we have only 2 files, with the same name, different path
|
||||
# (MOST COMMON, and I think we dont care per file, just per path)
|
||||
# by here we have only 2 files, with the same name, different path (ask per path)
|
||||
elif d1 != self.dups_to_process[hash][0].d:
|
||||
if d1 != '':
|
||||
self.overall_dup_cnt += dup_cnt
|
||||
@@ -245,7 +235,6 @@ class Duplicates:
|
||||
d2 = self.dups_to_process[hash][1].d
|
||||
did1 = self.dups_to_process[hash][0].did
|
||||
did2 = self.dups_to_process[hash][1].did
|
||||
str=f"duplicates found in {d1} and {d2}"
|
||||
hashes = f"{hash},"
|
||||
else:
|
||||
dup_cnt += 1
|
||||
|
||||
Reference in New Issue
Block a user