Submission #629895

#TimeUsernameProblemLanguageResultExecution timeMemory
629895jh05013Catfish Farm (IOI22_fish)C++17
81 / 100
1073 ms138756 KiB
#pragma GCC optimize("Ofast") #include <bits/stdc++.h> using namespace std; void OJize(){cin.tie(NULL);ios_base::sync_with_stdio(false);} typedef long long ll; const int IINF = 0x3f3f3f3f; const ll LINF = 0x3f3f3f3f3f3f3f3f; #define sz(X) (int)((X).size()) #define entire(X) X.begin(),X.end() template <class T1, class T2>ostream&operator<<(ostream &os,pair<T1,T2>const&x){os<<'('<<x.first<<", "<<x.second<<')';return os;} template <class Ch, class Tr, class Container>basic_ostream<Ch,Tr>&operator<<(basic_ostream<Ch,Tr>&os,Container const&x){os<<"[ ";for(auto&y:x)os<<y<<" ";return os<<"]";} template <typename T> struct Compress{ int n; vector<T> arr; void add(T x){arr.push_back(x); n++;} void init(){sort(entire(arr));} int lb(T x){return lower_bound(entire(arr), x) - arr.begin();} int ub(T x){return upper_bound(entire(arr), x) - arr.begin();} }; // NOTE: this is not a "sparse table" related to // binary lifting, it's literally an array which is sparse!! template<typename T> struct SparseArray{ // not actually sparse yet!! int n, L; vector<int> idxs; Compress<int> C; vector<T> val, sum, prefm, sufm; SparseArray(int N): n{N} {} map<int, T> M; void write(int i, T x){M[i] = x;} void init(){ L = sz(M); if(!L) return; sum.resize(L), prefm.resize(L), sufm.resize(L); for(auto [i, x]: M) C.add(i), val.push_back(x); C.init(); sum[0] = prefm[0] = val[0]; for(int i=1; i<L; i++){ sum[i] = sum[i-1]+val[i]; prefm[i] = max(prefm[i-1], val[i]); } idxs = C.arr; sufm[L-1] = val[L-1]; for(int i=L-2; i>=0; i--) sufm[i] = max(sufm[i+1], val[i]); } T operator[](int i){ int idx = C.lb(i); if(idx == L || C.arr[idx] != i) return 0; return val[idx]; } T prefsum(int i){ i = C.ub(i)-1; return i>=0? sum[i] : (T)0; } T getsum(int l, int r){return prefsum(r) - prefsum(l);} T allmax(){return prefm.back();} T prefmax(int i){ i = C.ub(i)-1; return i>=0? prefm[i] : (T)0; } T sufmax(int i){ i = C.lb(i); return i<L? sufm[i] : (T)0; } }; ll max_weights(int L, int n, vector<int> X, vector<int> Y, vector<int> W){ vector<SparseArray<ll>> grid(L, SparseArray<ll>(L+1)); for(int i=0; i<L; i++) grid[i].write(0, 0), grid[i].write(L, 0); for(int i=0; i<n; i++) grid[X[i]].write(Y[i], (ll)W[i]); for(auto &A: grid) A.init(); SparseArray<ll> dinc(L+1), ddec(L+1); dinc.init(); ddec.init(); // column _, // last column height j, // next will increase/decrease for(int i=1; i<L; i++){ SparseArray<ll> ndinc(L+1), nddec(L+1); SparseArray<ll> incmaxer(L+1), decmaxer(L+1); for(int y0: grid[i-1].idxs){ incmaxer.write(y0, dinc[y0] - grid[i-1].prefsum(y0-1)); decmaxer.write(y0, ddec[y0] + grid[i].prefsum(y0-1)); } incmaxer.init(), decmaxer.init(); for(int y: grid[i].idxs){ ll incy = 0, decy = 0; if(y == 0) incy = decy = ddec[0]; if(y == L) incy = decy = max(ddec[0], dinc[L]); ll incmax = grid[i-1].prefsum(y-1) + incmaxer.prefmax(y-1); incy = max(incy, incmax); if(y == L) decy = max(decy, incmax); ll decmax = -grid[i].prefsum(y-1) + decmaxer.sufmax(y+1); decy = max(decy, decmax); ndinc.write(y, incy); nddec.write(y, decy); } ndinc.init(), nddec.init(); dinc = ndinc, ddec = nddec; } return max(dinc.allmax(), ddec.allmax()); } #ifdef jh int main(){OJize(); int n; cin>>n; vector<int> X, Y, W; for(int i=0; i<n; i++) for(int j=0; j<n; j++){ int x; cin>>x; if(x){ X.push_back(j); Y.push_back(n-1-i); W.push_back(x); } } cout << max_weights(n, sz(X), X, Y, W); } #endif
#Verdict Execution timeMemoryGrader output
Fetching results...
#Verdict Execution timeMemoryGrader output
Fetching results...
#Verdict Execution timeMemoryGrader output
Fetching results...
#Verdict Execution timeMemoryGrader output
Fetching results...
#Verdict Execution timeMemoryGrader output
Fetching results...
#Verdict Execution timeMemoryGrader output
Fetching results...
#Verdict Execution timeMemoryGrader output
Fetching results...
#Verdict Execution timeMemoryGrader output
Fetching results...