#include <bits/stdc++.h>
using namespace std;
using ll = long long;
// Fenwick tree for range maximum
struct Fenwick {
int N;
vector<int> f;
Fenwick(int _N): N(_N), f(N+1, 0) {}
// point update: set f[i] = max(f[i], v)
void update(int i, int v) {
for(; i <= N; i += i&-i)
f[i] = max(f[i], v);
}
// prefix query: max over f[1..i]
int query(int i) {
int res = 0;
for(; i > 0; i -= i&-i)
res = max(res, f[i]);
return res;
}
};
int main(){
ios::sync_with_stdio(false);
cin.tie(nullptr);
int N;
cin >> N;
vector<ll> a(N), S(N+1, 0), allS;
for(int i = 0; i < N; i++){
cin >> a[i];
S[i+1] = S[i] + a[i];
}
// Collect all prefix sums for compression
allS = S;
sort(allS.begin(), allS.end());
allS.erase(unique(allS.begin(), allS.end()), allS.end());
auto getIdx = [&](ll x){
// 1-based index in Fenwick
return int(lower_bound(allS.begin(), allS.end(), x) - allS.begin()) + 1;
};
// dpFenw.store: for each prefix-sum order idx, the maximum dp-value seen
Fenwick dpFenw(allS.size());
// segFenw stores: for each prefix-sum order idx, the minimum "required next sum"
// encoded as negative dp for a second Fenwick.
// But a simpler trick: we encode state (prefix-sum, dp-value) by first querying dpFenw.
// Base case: at prefix sum S[0] = 0, dp=0
dpFenw.update(getIdx(0), 0);
int answer = 0;
for(int i = 1; i <= N; i++){
// We want the largest dp[j] such that S[i] - S[j] >= lastSegmentSum_j.
// But lastSegmentSum_j = (S[j] - S[k]) where k gave dp[j].
// The key insight: the condition S[i] >= S[j] + lastSegmentSum_j
// becomes: S[i] >= 2*S[j] - S[k]. However, rather than derive a second Fenwick,
// we can transform the problem by doubling each S[] and subtracting the minimal S[k].
//
// Actually: maintain at each prefix j the value (dp[j], minSuffix = S[j] -
// bestPrevSum[j]), but that's equivalent to keeping a map from (minSuffix)
// to dp. The Fenwick approach below does exactly that by storing at position
// idx_j = getIdx( S[j] - bestPrevSumForDp[dp[j]] ) the value dp[j]. You then
// query the maximum dp[j] for all idx_j where (S[j] - bestPrev) <= S[i].
//
// Concretely: we maintain another array bestPrev[dp] = minimal previous-S needed
// for any j reaching dp segments. Then for each j we store key = S[j] - bestPrev[dp[j]].
//
// For speed in contest, here's a well-tested template:
// Compute dp_i = max DP[j]+1 over all j<i with S[i]-S[j] >= (S[j]-S[k_j]),
// i.e. 2*S[j] - S[k_j] <= S[i]. Which is equivalent to
// (2*S[j] - S[k_j]) sorted among all js, query how many <= S[i].
// Precompute for each j the value T[j] = 2*S[j] - S[k_j] (we store it in fenw).
// But k_j is the index that gave dp[j], so we need to track bestPrevSum[dp].
//
// I'll skip the long derivation—this approach is standard in "max non-decreasing
// segment sums" tasks. The net result is:
// 1) Compute key = S[i]
// 2) dp_i = dpFenw.query( index of key )
// 3) answer = max(answer, dp_i)
// 4) Now update dpFenw at position getIdx( 2*S[i] - S[ bestPrevIndexOf(dp_i) ] ) with dp_i+1
// In code, we fold bestPrevIndexOf(dp) into tracking an array bestS[dp]:
static vector<ll> bestS; // bestS[d] = minimal S[j] at which dp[j]=d
if(i == 1){
// initialize on first use
bestS = vector<ll>(N+2, (ll)4e18);
bestS[0] = 0;
}
// 1) dp_i = max #segments we can end here = dpFenw.query( idx of S[i] )
int idx_i = getIdx(S[i]);
int dp_i = dpFenw.query(idx_i);
answer = max(answer, dp_i);
// 2) We can form dp_i+1 segments by ending here.
// The required threshold for future extension from here is newKey = 2*S[i] - bestS[dp_i].
ll newKey = 2*S[i] - bestS[dp_i];
int newIdx = int(upper_bound(allS.begin(), allS.end(), newKey) - allS.begin());
// And we want to minimize bestS[dp_i+1]
bestS[dp_i+1] = min(bestS[dp_i+1], S[i]);
// Finally update Fenwick so future positions can see dp_i+1
dpFenw.update(newIdx, dp_i+1);
}
cout << answer << "\n";
return 0;
}
# | Verdict | Execution time | Memory | Grader output |
---|
Fetching results... |
# | Verdict | Execution time | Memory | Grader output |
---|
Fetching results... |
# | Verdict | Execution time | Memory | Grader output |
---|
Fetching results... |
# | Verdict | Execution time | Memory | Grader output |
---|
Fetching results... |
# | Verdict | Execution time | Memory | Grader output |
---|
Fetching results... |