Submission #1369925

#TimeUsernameProblemLanguageResultExecution timeMemory
1369925retr0foxxCatfish Farm (IOI22_fish)C++20
52 / 100
450 ms287132 KiB
#include "fish.h"

#include <iostream>
#include <vector>
#include <algorithm>

#define int long long
#define MAXN 3005
#define MAXM (MAXN*MAXN)

/*
this is a nice dp problem
so here's the thing, we only care about the previous two columns when deciding the current column right

suppose the current column i has height j, then we are continuing column i-1:
- if column i-1 has height less than or equal to j 
  - we must also consider the height of column i-2, as it can now be greater than the height of column i-1
    to do this we use a separate type of dp
- if column i-1 has height greater than or equal to j


there are two types of dp that can represent a column:
- a "normal" DP
  this stores the normal "what is the highest we can achieve if we only have up to i, and at i we build j"
- a "continuation" DP
  this store something weirder, "what is the highest we can achieve if we only have up to i, and at i we build some k <= j, BUT at i-1 we build j"
huh wait the second DP is just a DP thats tied to i-2 then?

"what is the highest we can achieve, if we have up to i+1, the at i has height j, and at i+1 we have some height lower than j
wait but thats.. redundant?
ok nevermind yeah the continuation dp is actually unnecessary

the conclusion is this:
at index i, if it continues i-1, and it is already greater than i, then we have all the information we need
however if it is less than i, then it is NEVER the case that i-2 is greater than i
*/

int N, M;
std::vector<signed> X, Y, W;

int helper_one[MAXN], helper_two[MAXN], helper_three;
int dp[MAXN][MAXN];
int dpls[MAXN][MAXN];
int dpmm[MAXN][MAXN];
int dpm;
int dpt[MAXN];
int WA[MAXN][MAXN];

int pfr(int *arr, int l, int r) { return arr[r] - (l == 0 ? 0 : arr[l-1]); }

void contribute(int dpi)
{
	for (int i = 0; i < N; ++i)
		dpm = std::max(dpm, dp[dpi][i] + (dpi+1 < N ? WA[dpi+1][i] : 0));
}

long long max_weights(signed _N, signed _M, std::vector<signed> _X, std::vector<signed> _Y,
                      std::vector<signed> _W) {

	N = _N; M = _M;
	X = std::move(_X);
	Y = std::move(_Y);
	W = std::move(_W);
	
	for (int i = 0; i < M; ++i)
		WA[X[i]][Y[i]] = W[i];
	for (int i = 0; i < N; ++i)
		for (int j = 1; j < N; ++j)
			WA[i][j] += WA[i][j-1];

	for (int i = 0; i < N; ++i)
	{
		dp[0][i] = 0;//N > 1 ? WA[1][i] : 0;
		dpm = std::max(dpm, dp[0][i]);
		dpmm[0][i] = std::max(i > 0 ? dpmm[0][i-1] : 0, dp[0][i]);
	}
	
	for (int i = 1; i < N; ++i)
	{
		//printf("%i: ", i);
		
		helper_three = 0;
		for (int l = 0; l < N; ++l)
			helper_one[l] = helper_two[l] = 0;
				
		for (int l = 0; l < N; ++l)
		{
			helper_one[l] = std::max(l > 0 ? helper_one[l-1] : 0, dpls[i-1][l] - WA[i-1][l]);
		}
		for (int l = N-1; l >= 0; --l)
		{
			helper_two[l] = std::max(l < N-1 ? helper_two[l+1] : 0, dp[i-1][l] + WA[i][l]);
		}
		if (i-2 >= 0)
		{
			for (int l = 0; l < N; ++l)
				helper_three = std::max(helper_three, dp[i-2][l]);
		}
		for (int j = 0; j < N; ++j)
		{
			dp[i][j] = dpm + WA[i-1][j];
			dpls[i][j] = dp[i][j];
			
			dp[i][j] = std::max(dp[i][j], helper_one[j] + WA[i-1][j]);
			dpls[i][j] = std::max(dp[i][j], helper_one[j] + WA[i-1][j]);
			dp[i][j] = std::max(dp[i][j], helper_two[j] - WA[i][j]);
			if (i-2 >= 0)
			{
				dp[i][j] = std::max(dp[i][j], helper_three + WA[i-1][j]);
			}
			/*
			for (int l = 0; l < N; ++l)
			{
				if (l <= j)
				{
					dp[i][j] = std::max(dp[i][j], dpls[i-1][l] + WA[i-1][j] - WA[i-1][l]);
					dpls[i][j] = std::max(dpls[i][j], dpls[i-1][l] + WA[i-1][j] - WA[i-1][l]);
				}
				else
					dp[i][j] = std::max(dp[i][j], dp[i-1][l] + WA[i][l] - WA[i][j]);
				
				if (i-2 >= 0)
				{
					// easily optimizable
					dp[i][j] = std::max(dp[i][j], dp[i-2][l] + WA[i-1][j]);
				}
			}*/
			//printf("%i(%i,%i,%i) ", dp[i][j], dpls[i][j], dpm, WA[i-1][j]);
		}
		//printf("\n");
		if (i >= 2)
			contribute(i-2);
	}
	contribute(N-1);
	contribute(N-2);
	return dpm;
}
#Result Execution timeMemoryGrader output
Fetching results...
#Result Execution timeMemoryGrader output
Fetching results...
#Result Execution timeMemoryGrader output
Fetching results...
#Result Execution timeMemoryGrader output
Fetching results...
#Result Execution timeMemoryGrader output
Fetching results...
#Result Execution timeMemoryGrader output
Fetching results...
#Result Execution timeMemoryGrader output
Fetching results...
#Result Execution timeMemoryGrader output
Fetching results...