Submission #438012

#TimeUsernameProblemLanguageResultExecution timeMemory
438012fleimgruberKeys (IOI21_keys)C++17
9 / 100
473 ms96408 KiB
// --- algolib/union-find.h --- // union find, optionally with tags for each component. this is useful // for example when compressing components. enable using template parameter #include <algorithm> #include <type_traits> #include <numeric> #include <vector> template<typename T = void, typename enable = void> class UnionFind { std::vector<int> p, size_; public: UnionFind(int n = 0) : p(n), size_(n, 1) { std::iota(p.begin(), p.end(), 0); } int create() { int r = p.size(); p.push_back(r); size_.push_back(1); return r; } int find(int i) { if (i == p[i]) return i; return p[i] = find(p[i]); } int operator[](int i) { return find(i); } int size(int i) { return size_[find(i)]; } bool connected(int a, int b) { return find(a) == find(b); } bool connect(int a, int b) { a = find(a), b = find(b); if (a == b) return false; if (size_[a] > size_[b]) std::swap(a, b); size_[b] += size_[a]; p[a] = b; return true; } }; template<typename T> class UnionFind<T, typename std::enable_if_t<!std::is_same_v<T, void>>> : public UnionFind<void> { std::vector<T> tags; public: UnionFind(int n = 0) : UnionFind<void>(n), tags(n) { } void set_tag(int i, const T& t) { tags[find(i)] = t; } const T& tag(int i) { return tags[find(i)]; } }; // ---------------- #include <bits/stdc++.h> using namespace std; const int MAX_N = 300005; int n; vector<int> ans; struct Edge { int to, key; // key required bool operator<(const Edge& o) const { return key < o.key; } }; set<Edge> out_blocked[MAX_N]; vector<int> out_can[MAX_N]; // only the to int vis[MAX_N], par[MAX_N]; // vis_time, parent in DAG/tree set<int> keys[MAX_N]; std::vector<int> in_comp[MAX_N]; // for each vertex the set of vertices // it represents // weakly is weakly connected components UnionFind<int> compressed(MAX_N), weakly(MAX_N); // a into b template<typename T> void smaller_to_larger_vec(T& a, T& b) { if (a.size() > b.size()) a.swap(b); b.insert(b.end(), a.begin(), a.end()); a.clear(); } // merge i into j void merge(int i, int j) { compressed.connect(i, j); compressed.set_tag(j, j); // easy ones smaller_to_larger_vec(in_comp[i], in_comp[j]); smaller_to_larger_vec(out_can[i], out_can[j]); // aiming for subtask 4, where key sets are small // smaller-to-larger for keys + blocked edges // merge blocked edges from smaller to larger, and promote to can if (out_blocked[i].size() > out_blocked[j].size()) { out_blocked[i].swap(out_blocked[j]); keys[i].swap(keys[j]); } for (auto x : out_blocked[i]) if (keys[j].find(x.key) == keys[j].end()) // still blocked out_blocked[j].insert(x); else // can! out_can[j].push_back(x.to); out_blocked[i].clear(); // blocked edges merged! // now merge keys into j // ideally we'd like to use another smaller to larger here TODO // this promotes edges originally in j, but as each edge is only // promoted once, so amortised complexity is fast for (int x : keys[i]) if (keys[j].insert(x).second) { // no duplicate => promote // to doesn't matter auto it = out_blocked[j].lower_bound({ -1, x }); while (it != out_blocked[j].end() && it->key == x) { out_can[j].push_back(it->to); it = out_blocked[j].erase(it); } } keys[i].clear(); // merged all keys } void solve() { bool trivial = false; for (int i = 0; i < n; i++) if (out_can[i].empty()) { trivial = true; ans[i] = 1; } else { // just take any edge int to = out_can[i].back(); out_can[i].pop_back(); par[i] = to; weakly.connect(i, to); } if (trivial) return; // otherwise each weakly connected component is a tree // with one extra edge. compress corresponding cycle to get a // directed forest // tags are the roots (where keys & edges are stored) for (int i = 0; i < n; i++) { compressed.set_tag(i, i); in_comp[i].push_back(i); } // roots, ordered by size struct PQKey { int vertex, size; bool operator<(const PQKey& o) const { return size > o.size; } }; priority_queue<PQKey> roots; for (int i = 0; i < n; i++) if (vis[i] == 0) { int t = i + 1; // 1-based vis times int j = i; do { vis[j] = t; j = par[j]; } while (vis[j] == 0); if (vis[j] == t) { // cycle! int start = j; for (j = par[j]; j != start; j = par[j]) merge(j, start); par[start] = -1; roots.push({ start, compressed.size(start) }); } } // != -1 means we already found an answer of size done_size // so stop pushing to pq, but collect remaining components int done_size = -1; while (!roots.empty()) { auto [r, _] = roots.top(); roots.pop(); // find any outgoing edge bool has_outgoing = false; while (!out_can[r].empty()) { int to = out_can[r].back(); out_can[r].pop_back(); if (compressed.connected(r, to)) continue; has_outgoing = true; if (done_size != -1) // we're larger than the answer break; if (weakly.connected(r, to)) { // merge with self // walk up parent chain, until we're in root comp int j = compressed.tag(to); do { int jp = compressed.tag(par[j]); merge(j, jp); j = jp; } while (par[j] != -1); // we're still a root, but of larger size roots.push({ r, compressed.size(r) }); } else { // other component // merge with other component par[r] = to; weakly.connect(r, to); // we're no root any longer (no pq push) } break; } if (!has_outgoing && (done_size == -1 || done_size == compressed.size(r))) { done_size = compressed.size(r); for (int x : in_comp[r]) ans[x] = 1; } } } vector<int> find_reachable(vector<int> r, vector<int> u, vector<int> v, vector<int> c) { n = r.size(); for (int i = 0; i < n; i++) keys[i].insert(r[i]); int m = u.size(); for (int i = 0; i < m; i++) for (int x = 0; x < 2; x++) { if (r[u[i]] == c[i]) out_can[u[i]].push_back(v[i]); else out_blocked[u[i]].insert({ v[i], c[i] }); swap(u[i], v[i]); } ans.assign(n, 0); solve(); return ans; }
#Verdict Execution timeMemoryGrader output
Fetching results...
#Verdict Execution timeMemoryGrader output
Fetching results...
#Verdict Execution timeMemoryGrader output
Fetching results...
#Verdict Execution timeMemoryGrader output
Fetching results...
#Verdict Execution timeMemoryGrader output
Fetching results...