This submission is migrated from previous version of oj.uz, which used different machine for grading. This submission may have different result if resubmitted.
#include <iostream>
#include <map>
#include <tuple>
#include <utility>
#include <vector>
struct ConvexLinearSet {
int m = 0;
long long c = 0;
std::map<long long, int> m_changes; // key: xpos, val: dm
int size() {
return m_changes.size();
}
void merge_with(const ConvexLinearSet& o) {
m += o.m;
c += o.c;
for (auto [x, dm]: o.m_changes) {
m_changes[x] += dm;
}
}
std::tuple<long long, long long, int> pop_until_negative_gradient() {
long long x, x_prev;
int dm;
while (!m_changes.empty() && m >= 0) {
// pop largest
x_prev = x;
std::tie(x, dm) = *m_changes.rbegin();
m_changes.erase(x);
// update current m & c
// m*x + c = (m - dm)*x + new_c -> new_c = x*dm + c
m -= dm;
c += x * dm;
}
return {x_prev, x, dm};
}
long long query_min() {
long long x, x_prev;
int dm;
std::tie(x_prev, x, dm) = pop_until_negative_gradient();
return x * m + c;
}
void apply_w(int w) {
long long x, x_prev;
int dm;
std::tie(x_prev, x, dm) = pop_until_negative_gradient();
// x = leftmost xpos that make the minimum
long long minval = x * m + c;
if (m != -1) {
m_changes[x] -= m + 1; // make gradient = -1 at x
}
m_changes[x + w]++; // make next gradient = 0 at x + w
// we are at m = 0 now, jump to the end of m = 0
x = m == -dm ? x_prev : x;
x += w;
// make gradient = 1
// const = x + new_c -> new_c = const - x
m_changes[x]++;
m = 1;
c = minval - x;
}
};
int main() {
std::cin.tie(NULL)->sync_with_stdio(false);
int n, m;
std::cin >> n >> m;
std::vector<ConvexLinearSet> sets(n + m + 1);
std::vector<std::vector<int>> adj(n + m + 1);
std::vector<int> w(n + m + 1);
w[1] = 0;
for (int i = 2; i <= n + m; i++) {
int par;
std::cin >> par >> w[i];
adj[par].push_back(i);
if (i > n) {
// leaf nodes
sets[i].m = 1;
sets[i].c = -w[i];
sets[i].m_changes[w[i]] = 2;
}
}
// idea: dp[u][x] = apply_w(sum(dp[v][x]), w[u])
// dp[leaf][x] = abs(x - w[leaf])
// apply_w with convex linear func -> we know these two greedy logic:
// 1. for gradient > 1, we can instead increase from w => we can have gradient=1 instead
// 2. for gradient < 0, we can use up all w first => x..x+w will have gradient=-1
// so at minimum point (mx, my), we can have line (mx, my + w) to (mx + c, my) and then (mx + c, my) to (last_mx + c, my), then gradient=1 afterwards
// but why shift right? because we can think of all the base constant are initially moved by w, and later on we cut at minimum points and apply (2) to the left direction.
auto dfs = [&](auto &&dfs_fn, int u) -> void {
if (adj[u].empty()) {
return;
}
// smaller to larger trick here
// each leaf element will only be merged at most logN times
for (int v: adj[u]) {
dfs_fn(dfs_fn, v);
if (sets[u].size() < sets[v].size()) {
// find largest set
std::swap(sets[u], sets[v]);
}
}
// merge sets to the largest one
for (int v: adj[u]) {
sets[u].merge_with(sets[v]);
}
sets[u].apply_w(w[u]);
};
dfs(dfs, 1);
std::cout << sets[1].query_min() << std::endl;
return 0;
}
# | Verdict | Execution time | Memory | Grader output |
---|
Fetching results... |
# | Verdict | Execution time | Memory | Grader output |
---|
Fetching results... |
# | Verdict | Execution time | Memory | Grader output |
---|
Fetching results... |
# | Verdict | Execution time | Memory | Grader output |
---|
Fetching results... |