/**
* IOI 2015: Teams
* * Solution Approach:
* 1. Data Structure: Persistent Segment Tree.
* - We build a persistent segment tree where the i-th version (root[i]) contains information
* about all students with A[student] <= i.
* - The segment tree is built over the range of B values [1, N].
* - root[i] stores the counts of students at each B coordinate.
* - This allows us to calculate count(a, b) = number of students with A <= a and B >= b
* using query(root[a], b, N) in O(log N).
* * 2. Logic:
* - For each query with M projects, we first sort the required team sizes K.
* - We need to verify if we can satisfy the demands. A known greedy strategy is to assign
* students to the smallest possible team size they fit into (specifically, smallest K
* requirement, using students with smallest valid B).
* - To optimize this, we use a DP approach verified by a stack (lower envelope convex hull).
* - Let dp[i] be the "surplus" or "slack" capacity after satisfying demands for groups 1...i.
* Specifically, we check the condition derived from Hall's Marriage Theorem adapted for this interval graph.
* - The recurrence relation is:
* dp[i] = min_{j < i} (dp[j] + count(K[i], K[i]) - count(K[j], K[i])) - K[i]
* Which simplifies to minimizing: dp[j] - count(K[j], K[i]).
* Let f_j(y) = dp[j] - count(K[j], y). We want min_{j < i} (f_j(K[i])) + count(K[i], K[i]) - K[i] >= 0.
* * 3. Optimization:
* - The function f_j(y) represents a curve. As we process queries with increasing y (K values),
* we maintain the lower envelope of these curves using a stack.
* - Since the "slope" behavior allows intersection points to be found via binary search,
* we can maintain the optimal candidate j for the current y on the stack.
*/
#include <vector>
#include <algorithm>
#include <iostream>
using namespace std;
// --- Persistent Segment Tree ---
const int MAXN = 500005;
const int MAX_NODES = MAXN * 22; // N log N roughly
struct Node {
int l, r; // Left and right children indices
int sum; // Number of students in this range
} tree[MAX_NODES];
int roots[MAXN]; // Roots for the persistent versions
int node_cnt = 0;
// Build an empty tree
int build(int tl, int tr) {
int id = ++node_cnt;
tree[id].sum = 0;
if (tl != tr) {
int tm = (tl + tr) / 2;
tree[id].l = build(tl, tm);
tree[id].r = build(tm + 1, tr);
}
return id;
}
// Update: Add a student with B-value 'pos'
int update(int prev_node, int tl, int tr, int pos) {
int id = ++node_cnt;
tree[id] = tree[prev_node]; // Copy previous node
tree[id].sum++;
if (tl == tr) return id;
int tm = (tl + tr) / 2;
if (pos <= tm)
tree[id].l = update(tree[prev_node].l, tl, tm, pos);
else
tree[id].r = update(tree[prev_node].r, tm + 1, tr, pos);
return id;
}
// Query: Count students with B-value in range [ql, qr]
int query(int node, int tl, int tr, int ql, int qr) {
if (ql > qr) return 0;
if (ql == tl && qr == tr) return tree[node].sum;
int tm = (tl + tr) / 2;
// Optimize: if range is fully on one side
if (qr <= tm) return query(tree[node].l, tl, tm, ql, qr);
if (ql > tm) return query(tree[node].r, tm + 1, tr, ql, qr);
return query(tree[node].l, tl, tm, ql, tm) +
query(tree[node].r, tm + 1, tr, tm + 1, qr);
}
int N_global;
// Helper: Count students with A <= a_lim and B >= b_lim
int count_students(int a_lim, int b_lim) {
if (b_lim > N_global) return 0;
return query(roots[a_lim], 1, N_global, b_lim, N_global);
}
void init(int N, int A[], int B[]) {
N_global = N;
node_cnt = 0;
// Group students by A[i] to build persistent tree versions
vector<vector<int>> by_A(N + 1);
for (int i = 0; i < N; i++) {
by_A[A[i]].push_back(B[i]);
}
roots[0] = build(1, N);
for (int i = 1; i <= N; i++) {
roots[i] = roots[i-1];
for (int b_val : by_A[i]) {
roots[i] = update(roots[i], 1, N, b_val);
}
}
}
// Stack Element for the Lower Envelope
struct StackItem {
int idx; // Index in the K array
int limit_y; // This idx is optimal for y <= limit_y (actually strictly < limit_y in some logic, but used as cutoff)
};
int can(int M, int K[]) {
// 1. Prepare Inputs
// We sort K. We treat K as 1-based to match the logic (K[1]...K[M]).
// We pad K[0] = 0.
vector<int> k_sorted(M + 1);
k_sorted[0] = 0;
for(int i=0; i<M; ++i) k_sorted[i+1] = K[i];
sort(k_sorted.begin() + 1, k_sorted.end());
// dp[i] stores the 'slack' value for the i-th group
// We don't need a full array, we compute on the fly, but storing values helps for split calculation
// Since M <= 200,000, a vector is fine.
vector<long long> dp(M + 1);
dp[0] = 0;
// Stack for lower envelope
// Stores indices j.
// The valid range for the top of the stack is [K[j], stack.back().limit_y]
// The stack is ordered such that deeper elements are valid for larger Y.
// Top = Newest/Smallest Y range.
vector<StackItem> st;
st.push_back({0, N_global + 1}); // Base case: index 0 valid everywhere initially
for (int i = 1; i <= M; i++) {
int k_curr = k_sorted[i];
// 2. Query Phase: Find optimal j for current k_curr
// Since we are processing i increasing, k_curr increases.
// The stack is organized: Top is best for SMALL y. Bottom is best for LARGE y.
// Wait, the function f_new(y) drops faster than f_old(y).
// So new is better (smaller) for LARGE y?
// Let's re-verify:
// f_j(y) = dp[j] - count(K[j], y).
// count(K[j], y) decreases as y increases.
// -count(K[j], y) increases.
// K[new] > K[old]. count(K[new], y) has more points => drops faster => -count rises faster.
// So f_new rises faster.
// If f_new rises faster, it is smaller for SMALL y and larger for LARGE y.
// So New is best for Small Y. Old is best for Large Y.
// Stack: Top = Newest (Small Y), Bottom = Oldest (Large Y).
// Our query point is y = k_curr.
// The valid range for Top is [..., Top.limit].
// If k_curr > Top.limit, then Top is no longer the minimum (it rose too high).
// We pop Top.
while (st.size() > 1 && st.back().limit_y < k_curr) {
st.pop_back();
}
int j = st.back().idx;
// Compute dp[i]
// dp[i] = dp[j] + count(K[i], K[i]) - count(K[j], K[i]) - K[i]
// This calculates the capacity of [K[j], K[i]] relative to the bottleneck at j
long long quantity = count_students(k_curr, k_curr) - count_students(k_sorted[j], k_curr);
dp[i] = dp[j] + quantity - k_curr;
if (dp[i] < 0) return 0;
// 3. Update Phase: Add i to stack
// We need to find the split point between current 'i' (New) and stack.back() (Old).
// Find smallest y > k_curr such that f_i(y) >= f_old(y). (Since i starts better/lower).
// actually we want smallest y where f_i(y) > f_old(y) ??
// No, we want the range where 'i' is minimal.
// 'i' is minimal for [k_curr, split]. 'old' takes over after split.
// So we want smallest y where f_old(y) < f_i(y).
// Condition: dp[old] - count(K[old], y) < dp[i] - count(K[i], y)
// dp[i] - dp[old] > count(K[old], y) - count(K[i], y)
// LHS is constant. RHS is Count(K[old] < A <= K[i], B >= y).
// RHS decreases as y increases.
// We want the first y where RHS drops strictly below LHS.
long long diff_dp = dp[i] - dp[st.back().idx];
// Perform binary search for split point
while (true) {
int old_idx = st.back().idx;
long long cur_diff = dp[i] - dp[old_idx];
// Binary search range: [k_curr + 1, N + 1]
// We want smallest y where count_diff(y) < cur_diff
int low = k_curr + 1;
int high = N_global + 1;
int ans = N_global + 1;
while (low <= high) {
int mid = (low + high) / 2;
int c_diff = count_students(k_curr, mid) - count_students(k_sorted[old_idx], mid);
if (c_diff < cur_diff) {
ans = mid;
high = mid - 1;
} else {
low = mid + 1;
}
}
// 'ans' is the split point. i is optimal for [k_curr, ans-1].
// old is optimal starting at 'ans'.
// If ans >= st.back().limit_y, it means 'i' is better than 'old'
// for the entire duration that 'old' was valid. 'old' is redundant.
if (ans >= st.back().limit_y) {
st.pop_back();
if (st.empty()) {
// Should theoretically not happen given base case logic, but good safety
st.push_back({i, N_global + 1});
break;
}
} else {
st.push_back({i, ans}); // Push new range limit defined by 'old' taking over
break;
}
}
}
return 1;
}
| # | Verdict | Execution time | Memory | Grader output |
|---|
| Fetching results... |
| # | Verdict | Execution time | Memory | Grader output |
|---|
| Fetching results... |
| # | Verdict | Execution time | Memory | Grader output |
|---|
| Fetching results... |
| # | Verdict | Execution time | Memory | Grader output |
|---|
| Fetching results... |