diff --git a/TODO b/TODO index b3a543c..a9c4683 100644 --- a/TODO +++ b/TODO @@ -5,6 +5,9 @@ # # 5 think I killed pa_job_manager without passing an eid to a transform job, shouldn't crash # SHOULD JUST get AI to help clean-up and write defensive code here... +# +# could cache getPage into document.page[x], then check if it exists, if so, don't go back to server for all the data, page[x], would need -> entrylist, pagelist, entries AND we need to consider if cache can be invalidated, then how do I know / do I care... I THINK not, as the data is all self-contained, only will go pear shaped if we get a new page, and then it shoudl get a new set of entry ids -- now, if they are not fully contained in entrylist, we have a problem -- IS that the condition I should check? +## the above needs thought, even without cache, I go back a dir, and its now deleted, or I go forward/back a page and the entry ids have changed is there any way this can bite me... I think only if flat view the next/prev page of ids has no content / unexpected content? get_dir_eids is a new hit each time, so it could fail for deleted folders, etc. ### ### major fix - go to everywhere I call GetEntries(), and redo the logic totally... diff --git a/files.py b/files.py index 135b3a8..606e1a7 100644 --- a/files.py +++ b/files.py @@ -512,7 +512,8 @@ def search(search_term): OPT=States( request ) OPT.search_term = search_term query_data=GetSearchQueryData( OPT ) - return render_template("files.html", page_title='View Files', search_term=search_term, query_data=query_data, OPT=OPT ) + js_vers = getVersions() + return render_template("files.html", page_title='View Files', search_term=search_term, query_data=query_data, OPT=OPT, js_vers=js_vers ) ################################################################################ # /files/scan_ip -> allows us to force a check for new files