testing maybe slower/dumber but also maybe more *correct* rebuilding of pages after actions

merge-requests/341/head
Thomas Lynch 2 years ago
parent 7adaacc945
commit 440298a44d
  1. 80
      db/posts.js
  2. 10
      helpers/affectedboards.js
  3. 219
      models/forms/actionhandler.js
  4. 2
      models/forms/makepost.js

@ -13,14 +13,31 @@ module.exports = {
db,
getThreadPage: async (board, thread) => {
const threadsBefore = await db.countDocuments({
'board': board,
'thread': null,
'bumped': {
'$gte': thread.bumped
const threadsBefore = await db.aggregate([
{
'$match': {
'thread': null,
'board': board,
}
}, {
'$project': {
'sticky': 1,
'bumped': 1,
'postId': 1,
'board': 1,
'thread': 1
}
}, {
'$sort': {
'sticky': -1,
'bumped': -1
}
}
});
return Math.ceil(threadsBefore/10) || 1; //1 because 0 threads before is page 1
]).toArray();
//is there a way to do this in the db with an aggregation stage, instead of in js?
const threadIndex = threadsBefore.findIndex((e) => e.postId === thread);
const threadPage = Math.max(1, Math.ceil((threadIndex+1)/10));
return threadPage;
},
getBoardRecent: async (offset=0, limit=20, ip, board, permissions) => {
@ -617,6 +634,55 @@ module.exports = {
return oldThreads.concat(early404Threads);
},
getMinimalThreads: (boards) => {
return db.aggregate([
{
'$match': {
'thread': null,
'board': {
'$in': boards,
}
}
}, {
'$project': {
'sticky': 1,
'bumped': 1,
'postId': 1,
'board': 1,
'thread': 1,
}
}, {
'$sort': {
'sticky': -1,
'bumped': -1,
}
}, {
'$group': {
'_id': '$board',
'posts': {
'$push': '$$CURRENT',
}
}
}, {
'$group': {
'_id': null,
'posts': {
'$push': {
'k': '$_id',
'v': '$posts',
}
}
}
}, {
'$replaceRoot': {
'newRoot': {
'$arrayToObject': '$posts',
}
}
}
]).toArray().then(r => r[0]);
},
fixLatest: (boards) => {
return db.aggregate([
{

@ -22,15 +22,15 @@ module.exports = async (posts, deleting) => {
boardThreadMap[post.board].threads.add(threadId);
}
const beforePages = {};
const threadBoards = Object.keys(boardThreadMap);
const numPagesBeforeActions = {};
const affectedBoardNames = Object.keys(boardThreadMap);
//get number of pages for each before actions for deleting old pages and changing page nav numbers incase number of pages changes
if (deleting) {
await Promise.all(threadBoards.map(async board => {
beforePages[board] = Math.ceil((await Posts.getPages(board)) / 10);
await Promise.all(affectedBoardNames.map(async board => {
numPagesBeforeActions[board] = Math.ceil((await Posts.getPages(board)) / 10);
}));
}
return { boardThreadMap, beforePages, threadBoards };
return { boardThreadMap, numPagesBeforeActions, affectedBoardNames };
}

@ -25,6 +25,7 @@ const { Posts, Boards, Modlogs } = require(__dirname+'/../../db/')
module.exports = async (req, res, next) => {
//try to set a good redirect
let redirect = req.headers.referer;
if (!redirect) {
if (!req.params.board) {
@ -34,23 +35,23 @@ module.exports = async (req, res, next) => {
}
}
//if user isnt staff, and they put an action that requires password, e.g. delete/spoiler, then filter posts to only matching password
/*
Handle checking passwords (in a time-constant) when doing actions that require a password.
Staff skip this section because they don't need passwords to do such actions.
*/
const isStaffOrGlobal = res.locals.permissions.hasAny(Permissions.MANAGE_GLOBAL_GENERAL, Permissions.MANAGE_BOARD_GENERAL);
if (!isStaffOrGlobal && res.locals.actions.numPasswords > 0) {
let passwordPosts = [];
if (req.body.postpassword && req.body.postpassword.length > 0) {
//hash their input and make it a buffer
const inputPasswordHash = createHash('sha256').update(postPasswordSecret + req.body.postpassword).digest('base64');
const inputPasswordBuffer = Buffer.from(inputPasswordHash);
passwordPosts = res.locals.posts.filter(post => {
if (post.password != null) { //null password doesnt matter for timing attack, it cant be deleted by non-staff
const postBuffer = Buffer.from(post.password);
//returns true and passes filter if passwod matched. constant time compare
return timingSafeEqual(inputPasswordBuffer, postBuffer);
}
});
}
//no posts matched password, reject
if (passwordPosts.length === 0) {
return dynamicResponse(req, res, 403, 'message', {
'title': 'Forbidden',
@ -58,13 +59,17 @@ module.exports = async (req, res, next) => {
redirect,
});
}
//if the password is correct for at least *some* posts, silently ignore the wrong ones (dont action them), and continue.
res.locals.posts = passwordPosts;
}
//affected boards, list and page numbers
const deleting = req.body.delete || req.body.delete_ip_board || req.body.delete_ip_global || req.body.delete_ip_thread;
let { boardThreadMap, beforePages, threadBoards } = await getAffectedBoards(res.locals.posts, deleting);
//affected boards, their threads, and how many pages each one has before the actions
let { boardThreadMap, numPagesBeforeActions, affectedBoardNames } = await getAffectedBoards(res.locals.posts, deleting);
let minimalThreadsMap = await Posts.getMinimalThreads(affectedBoardNames);
//adjust the redirect to go back to the thread if it was done from there
if (deleting
&& req.params.board
&& req.headers.referer
@ -78,8 +83,9 @@ module.exports = async (req, res, next) => {
const messages = [];
const modlogActions = []
const combinedQuery = {};
let aggregateNeeded = false;
// if getting global banned, board ban doesnt matter
let recalculateThreadMetadata = false;
//handle bans, independent of other actions
if (req.body.ban || req.body.global_ban || req.body.report_ban || req.body.global_report_ban) {
const { message, action, query } = await banPoster(req, res, next);
if (req.body.ban) {
@ -97,19 +103,20 @@ module.exports = async (req, res, next) => {
}
messages.push(message);
}
if (deleting) {
//OP delete protection. for old OPs or with a lot of replies
if (!isStaffOrGlobal) {
//OP delete protection. for old or many replied OPs
const { deleteProtectionAge, deleteProtectionCount } = res.locals.board.settings;
if (deleteProtectionAge > 0 || deleteProtectionCount > 0) {
const protectedThread = res.locals.posts.some(p => {
return p.thread === null //is a thread
&& ((deleteProtectionCount > 0 && p.replyposts > deleteProtectionCount) //and it has more replies than the protection count
|| (deleteProtectionAge > 0 && new Date() > new Date(p.date.getTime() + deleteProtectionAge))); //or was created too long ato
|| (deleteProtectionAge > 0 && new Date() > new Date(p.date.getTime() + deleteProtectionAge))); //or was created too long ago
});
if (protectedThread === true) {
//alternatively, the above .some() could become a filter like some other options and silently not delete,
//but i think in this case it would be important to notify the user that their own thread(s) cant be deleted yet
return dynamicResponse(req, res, 403, 'message', {
'title': 'Forbidden',
'error': 'You cannot delete old threads or threads with too many replies',
@ -118,7 +125,9 @@ module.exports = async (req, res, next) => {
}
}
}
const postsBefore = res.locals.posts.length;
if (req.body.delete_ip_board || req.body.delete_ip_global || req.body.delete_ip_thread) {
const deletePostIps = res.locals.posts.map(x => x.ip.cloak);
const deletePostMongoIds = res.locals.posts.map(x => x._id)
@ -153,11 +162,10 @@ module.exports = async (req, res, next) => {
}
if (res.locals.posts.length > postsBefore) {
//recalc for extra fetched posts
const updatedAffected = await getAffectedBoards(res.locals.posts, deleting);
boardThreadMap = updatedAffected.boardThreadMap;
beforePages = updatedAffected.beforePages;
threadBoards = updatedAffected.threadBoards;
({ boardThreadMap, numPagesBeforeActions, affectedBoardNames } = await getAffectedBoards(res.locals.posts, deleting));
minimalThreadsMap = await Posts.getMinimalThreads(affectedBoardNames);
}
if (req.body.delete_file) {
const { message } = await deletePostsFiles(res.locals.posts, false); //delete files, not just unlink
messages.push(message);
@ -172,9 +180,11 @@ module.exports = async (req, res, next) => {
} else if (req.body.delete_ip_global) {
modlogActions.push('Global delete by IP');
}
aggregateNeeded = true;
recalculateThreadMetadata = true;
}
} else if (req.body.move) {
if (boardThreadMap[req.params.board].directThreads.size > 0) {
const threadIds = [...boardThreadMap[req.params.board].directThreads];
const fetchMovePosts = await Posts.db.find({
@ -188,10 +198,12 @@ module.exports = async (req, res, next) => {
const { message, action } = await movePosts(req, res);
if (action) {
modlogActions.push('Moved');
aggregateNeeded = true;
recalculateThreadMetadata = true;
}
messages.push(message);
} else {
// if it was getting deleted/moved, dont do these actions
if (req.body.unlink_file || req.body.delete_file) {
const { message, action, query } = await deletePostsFiles(res.locals.posts, req.body.unlink_file);
@ -201,7 +213,7 @@ module.exports = async (req, res, next) => {
} else if (req.body.delete_file) {
modlogActions.push('Delete files');
}
aggregateNeeded = true;
recalculateThreadMetadata = true;
combinedQuery[action] = { ...combinedQuery[action], ...query}
}
messages.push(message);
@ -266,7 +278,10 @@ module.exports = async (req, res, next) => {
}
messages.push(message);
}
}
//execute the actions from the resulting combined query in one shot
if (Object.keys(combinedQuery).length > 0) {
await Posts.db.updateMany({
'_id': {
@ -275,12 +290,12 @@ module.exports = async (req, res, next) => {
}, combinedQuery);
}
//fetch boards for templates if necessary. can be multiple boards from global actions
let buildBoards = {};
//get all affected boards for templates if necessary. can be multiple boards from global actions
if (modlogActions.length > 0 || res.locals.actions.numBuild > 0) {
buildBoards = (await Boards.db.find({
'_id': {
'$in': threadBoards
'$in': affectedBoardNames
},
}).toArray()).reduce((acc, curr) => {
if (!acc[curr._id]) {
@ -328,8 +343,8 @@ module.exports = async (req, res, next) => {
});
}
const modlogDocuments = [];
for (let i = 0; i < threadBoards.length; i++) {
const boardName = threadBoards[i];
for (let i = 0; i < affectedBoardNames.length; i++) {
const boardName = affectedBoardNames[i];
const boardLog = modlog[boardName];
//make it into documents for the db
modlogDocuments.push({
@ -340,8 +355,8 @@ module.exports = async (req, res, next) => {
if (modlogDocuments.length > 0) {
//insert the modlog docs
await Modlogs.insertMany(modlogDocuments);
for (let i = 0; i < threadBoards.length; i++) {
const board = buildBoards[threadBoards[i]];
for (let i = 0; i < affectedBoardNames.length; i++) {
const board = buildBoards[affectedBoardNames[i]];
buildQueue.push({
'task': 'buildModLog',
'options': {
@ -361,10 +376,10 @@ module.exports = async (req, res, next) => {
//if there are actions that can cause some rebuilding
if (res.locals.actions.numBuild > 0) {
//make it into an OR query for the db
//Make a map of all the unique threads for any posts we selected in those threads (and directly selected threads)
const queryOrs = [];
for (let i = 0; i < threadBoards.length; i++) {
const threadBoard = threadBoards[i];
for (let i = 0; i < affectedBoardNames.length; i++) {
const threadBoard = affectedBoardNames[i];
//convert this to an array while we are here
boardThreadMap[threadBoard].threads = [...boardThreadMap[threadBoard].threads];
boardThreadMap[threadBoard].directThreads = [...boardThreadMap[threadBoard].directThreads];
@ -375,8 +390,6 @@ module.exports = async (req, res, next) => {
}
})
}
//fetch threads per board that we only checked posts for
let threadsEachBoard = [];
if (queryOrs.length > 0) {
threadsEachBoard = await Posts.db.find({
@ -384,46 +397,21 @@ module.exports = async (req, res, next) => {
'$or': queryOrs
}).toArray();
}
//combine it with what we already had
const selectedThreads = res.locals.posts.filter(post => post.thread === null)
threadsEachBoard = threadsEachBoard.concat(selectedThreads)
//get the oldest and newest thread for each board to determine how to delete
let threadBounds = threadsEachBoard.reduce((acc, curr) => {
if (!acc[curr.board] || curr.bumped < acc[curr.board].bumped) {
acc[curr.board] = { oldest: null, newest: null};
}
if (!acc[curr.board].oldest || curr.bumped < acc[curr.board].oldest.bumped) {
acc[curr.board].oldest = curr;
}
if (!acc[curr.board].newest || curr.bumped > acc[curr.board].newest.bumped) {
acc[curr.board].newest = curr;
}
return acc;
}, {});
threadsEachBoard = threadsEachBoard.concat(selectedThreads);
if (aggregateNeeded) {
//recalculate replies and image counts if necessary
//recalculate replies and image counts if necessary
if (recalculateThreadMetadata) {
const selectedPosts = res.locals.posts.filter(p => p.thread !== null);
if (selectedPosts.length > 0) {
/* ignore
let threadOrs = selectedPosts.map(p => ({ board: p.board, postId: p.thread }));
let replyOrs = selectedPosts.map(p => ({ board: p.board, thread: p.thread }));
const [ threads, threadReplyAggregates] = await Promise.all([
Posts.db.find({ '$or': threadOrs }), //i think this is in threadsEachBoard already
Posts.getThreadAggregates(threadOrs)
]);
*/
let replyOrs = selectedPosts.map(p => ({ board: p.board, thread: p.thread }));
const replyOrs = selectedPosts.map(p => ({ board: p.board, thread: p.thread }));
const threadReplyAggregates = await Posts.getThreadAggregates(replyOrs);
const bulkWrites = [];
const threads = threadsEachBoard;
for (let i = 0; i < threads.length; i++) {
const replyAggregate = threadReplyAggregates.find(ra => ra._id.thread === threads[i].postId && ra._id.board === threads[i].board);
if (!replyAggregate) {
//thread no longer has any reply post/files, set to 0 and reset bump date to post date.
//sage replies and bumplock wouldnt matter in that case
//thread no longer has any reply post/files, set to 0 and reset bump date to post date
bulkWrites.push({
'updateOne': {
'filter': {
@ -439,13 +427,7 @@ module.exports = async (req, res, next) => {
}
}
});
//threadbound already fixed for this
} else {
if (replyAggregate.bumped < threadBounds[replyAggregate._id.board].oldest.bumped) {
threadBounds[replyAggregate._id.board].oldest = { bumped: replyAggregate.bumped };
} else if (replyAggregate.bumped < threadBounds[replyAggregate._id.board].newest.bumped) {
threadBounds[replyAggregate._id.board].newest = { bumped: replyAggregate.bumped };
}
//use results from first aggregate for threads with replies still existing
const aggregateSet = {
'replyposts': replyAggregate.replyposts,
@ -471,14 +453,37 @@ module.exports = async (req, res, next) => {
await Posts.db.bulkWrite(bulkWrites);
}
}
//afterwards, fix webring and board list latest post activity now. based on last bump date of a non bumplocked thread
await Posts.fixLatest(threadBoards);
await Posts.fixLatest(affectedBoardNames);
}
for (let i = 0; i < threadBoards.length; i++) {
const boardName = threadBoards[i];
const bounds = threadBounds[boardName];
/*
Get a minimal data of the threads for each affected board, used to get the page of a thread later.
Using the proper ordering of threads, to account for sticky, bumplocks, etc.
Todo: this even worth it or just rebuilding all pages is quicker instead?
*/
const pageBounds = threadsEachBoard.reduce((acc, t) => {
if (!acc[t.board]) { acc[t.board] = { first: null, last: null }; }
const threadIndex = minimalThreadsMap[t.board].findIndex(p => p.postId === t.postId);
const threadPage = Math.max(1, Math.ceil((threadIndex+1)/10));
if (!acc[t.board].first || threadPage < acc[t.board].first) {
acc[t.board].first = threadPage;
}
if (!acc[t.board].last || threadPage > acc[t.board].last) {
acc[t.board].last = threadPage;
}
return acc;
}, {});
for (let i = 0; i < affectedBoardNames.length; i++) {
//always assume catalog rebuild, gets set to false in specific cases later
let catalogRebuild = true;
//get the board data for build tasks, and highest/lowest affected pages for rebuilding
const boardName = affectedBoardNames[i];
const board = buildBoards[boardName];
//rebuild impacted threads
//rebuild destination thread for "move" action
if (req.body.move) {
buildQueue.push({
'task': 'buildThread',
@ -488,6 +493,8 @@ module.exports = async (req, res, next) => {
}
});
}
//rebuild affected threads
for (let j = 0; j < boardThreadMap[boardName].threads.length; j++) {
buildQueue.push({
'task': 'buildThread',
@ -497,14 +504,18 @@ module.exports = async (req, res, next) => {
}
});
}
//refresh any pages affected
const afterPages = Math.ceil((await Posts.getPages(boardName)) / 10);
let catalogRebuild = true;
if ((beforePages[boardName] && beforePages[boardName] !== afterPages) || req.body.move) { //handle moves here since dates would change and not work in old/new page calculations
if (afterPages < beforePages[boardName]) {
//amount of pages changed, rebuild all pages and delete any further pages (if pages amount decreased)
for (let k = beforePages[boardName]; k > afterPages; k--) {
//deleting html for pages that no longer should exist
//use to compare number of pages after actions. fetch from db again if any actions that can change number of pages (delete and move)
let numPagesAfterActions = numPagesBeforeActions[boardName];
if (deleting || req.body.move) {
numPagesAfterActions = Math.ceil((await Posts.getPages(boardName)) / 10);
}
if ((numPagesBeforeActions[boardName] && numPagesBeforeActions[boardName] !== numPagesAfterActions) || req.body.move) {
//if number of pages changed, or doing a "move", rebuild all pages for simplicity and delete any pages that would no longer exist
if (numPagesAfterActions < numPagesBeforeActions[boardName]) {
for (let k = numPagesBeforeActions[boardName]; k > numPagesAfterActions; k--) {
parallelPromises.push(remove(`${uploadDirectory}/html/${boardName}/${k}.html`));
parallelPromises.push(remove(`${uploadDirectory}/json/${boardName}/${k}.json`));
}
@ -514,22 +525,26 @@ module.exports = async (req, res, next) => {
'options': {
'board': board,
'startpage': 1,
'endpage': afterPages,
'endpage': numPagesAfterActions,
}
});
} else {
//number of pages did not change, only possibly building existing pages
const threadPageOldest = await Posts.getThreadPage(boardName, bounds.oldest);
const threadPageNewest = bounds.oldest.postId === bounds.newest.postId ? threadPageOldest : await Posts.getThreadPage(boardName, bounds.newest);
//build between pages
const rebuildPageFirst = pageBounds[boardName].first;
const rebuildPageLast = pageBounds[boardName].last;
if (deleting) {
if (boardThreadMap[boardName].directThreads.length === 0) {
//only deleting posts from threads, so thread order wont change, thus we dont delete all pages after
buildQueue.push({
'task': 'buildBoardMultiple',
'options': {
'board': board,
'startpage': threadPageNewest,
'endpage': threadPageOldest,
'startpage': rebuildPageFirst,
'endpage': rebuildPageLast,
}
});
} else {
@ -538,59 +553,69 @@ module.exports = async (req, res, next) => {
'task': 'buildBoardMultiple',
'options': {
'board': board,
'startpage': threadPageNewest,
'endpage': afterPages,
'startpage': rebuildPageFirst,
'endpage': numPagesAfterActions,
}
});
}
} else if (req.body.sticky) { //else if -- if deleting, other actions are not executed/irrelevant
} else if (req.body.sticky) {
//rebuild current and newer pages
buildQueue.push({
'task': 'buildBoardMultiple',
'options': {
'board': board,
'startpage': 1,
'endpage': threadPageOldest,
'endpage': rebuildPageLast,
}
});
} else if (req.body.lock || req.body.bumplock || req.body.cyclic || req.body.unlink_file) {
buildQueue.push({
'task': 'buildBoardMultiple',
'options': {
'board': board,
'startpage': threadPageNewest,
'endpage': threadPageOldest,
'startpage': rebuildPageFirst,
'endpage': rebuildPageLast,
}
});
} else if (req.body.spoiler || req.body.ban || req.body.global_ban) {
buildQueue.push({
'task': 'buildBoardMultiple',
'options': {
'board': board,
'startpage': threadPageNewest,
'endpage': afterPages,
'startpage': rebuildPageFirst,
'endpage': numPagesAfterActions,
}
});
//these actions dont affect the catalog tiles if no OPs selected, so dont bother rebuilding the catalog
if (boardThreadMap[boardName].directThreads.length === 0) {
catalogRebuild = false;
//these actions dont affect the catalog tile since not on an OP and dont change reply/image counts
}
}
}
if (catalogRebuild) {
//the actions will affect the catalog, so we better rebuild it
buildQueue.push({
'task': 'buildCatalog',
'options': {
'board': board,
}
});
}
}
}
if (parallelPromises.length > 0) {
//since queue changes, this just removing old html files
await Promise.all(parallelPromises);
}

@ -478,6 +478,7 @@ ${res.locals.numFiles > 0 ? req.files.file.map(f => f.name+'|'+(f.phash || '')).
});
}
const threadPage = data.thread ? (await Posts.getThreadPage(req.params.board, data.thread)) : 1;
const { postId, postMongoId } = await Posts.insertOne(res.locals.board, data, thread, res.locals.anonymizer);
let enableCaptcha = false; //make this returned from some function, refactor and move the next section to another file
@ -641,7 +642,6 @@ ${res.locals.numFiles > 0 ? req.files.file.map(f => f.name+'|'+(f.phash || '')).
});
} else if (data.thread) {
//refersh pages
const threadPage = await Posts.getThreadPage(req.params.board, thread);
if (data.email === 'sage' || thread.bumplocked) {
//refresh the page that the thread is on
buildQueue.push({

Loading…
Cancel
Save