This submission is migrated from previous version of oj.uz, which used different machine for grading. This submission may have different result if resubmitted.
#include <bits/stdc++.h>
#include <ext/pb_ds/assoc_container.hpp>
#include <ext/pb_ds/tree_policy.hpp>
using namespace std;
using namespace __gnu_pbds;
template<typename T> using Tree = tree<T, null_type, less<T>, rb_tree_tag, tree_order_statistics_node_update>;
typedef long long int ll;
typedef long double ld;
typedef pair<int,int> pii;
typedef pair<ll,ll> pll;
#define fastio ios_base::sync_with_stdio(false); cin.tie(NULL)
#define pb push_back
#define endl '\n'
#define sz(a) (int)a.size()
#define setbits(x) __builtin_popcountll(x)
#define ff first
#define ss second
#define conts continue
#define ceil2(x,y) ((x+y-1)/(y))
#define all(a) a.begin(), a.end()
#define rall(a) a.rbegin(), a.rend()
#define yes cout << "Yes" << endl
#define no cout << "No" << endl
#define rep(i,n) for(int i = 0; i < n; ++i)
#define rep1(i,n) for(int i = 1; i <= n; ++i)
#define rev(i,s,e) for(int i = s; i >= e; --i)
#define trav(i,a) for(auto &i : a)
template<typename T>
void amin(T &a, T b) {
a = min(a,b);
}
template<typename T>
void amax(T &a, T b) {
a = max(a,b);
}
#ifdef LOCAL
#include "debug.h"
#else
#define debug(x) 42
#endif
/*
refs:
https://github.com/dolphingarlic/CompetitiveProgramming/blob/master/APIO/APIO%2019-bridges.cpp
low constraints hinting at sqrt decomp?
but sqrt decomp on what?
=> sqrt decomp on queries
split the queries into blocks of size sqrt(n)
process blocks in order from left to right
there are at most sqrt(n) updates/queries in each block
at most sqrt(n) updates, so at most sqrt(n) edges are updated
split edges into 2 groups => ones that are updated somewhere in the block and the ones that are not updated in the block
now do a sweepline in dec ord of weight
when processing a query (u,w), all unupdated edges with weight >= w are added into a dsu
because the #of updated edges are small, process them separately
iterate over all updated edges, find their value at the time of query and add the edge to the dsu if weight >= w
once done, the ans for the query is the size of u's cc in the dsu
now, rollback from the dsu all the updated edges that were added
dsu needs to support rollback, so dont do path compression
in a block, <= m unupdated edges are added into the dsu
for each query, <= sqrt(q) updated edges are added into the dsu
time complexity:
O((q+m)*sqrt(q)*log(n))
runtime can be improved by chosing a better constant instead of sqrt(n)
B = 605 seems to be a good choice (figured out by experimentation)
*/
const int MOD = 1e9 + 7;
const int N = 1e5 + 5;
const int inf1 = int(1e9) + 5;
const ll inf2 = ll(1e18) + 5;
const int B = 605;
struct DSU {
vector<int> par, rankk, siz;
vector<array<int,4>> history;
DSU() {
}
DSU(int n) {
init(n);
}
void init(int n) {
par = vector<int>(n + 1);
rankk = vector<int>(n + 1);
siz = vector<int>(n + 1);
rep(i, n + 1) create(i);
}
void reset(int n){
rep(i,n+1) create(i);
history.clear();
}
void create(int u) {
par[u] = u;
rankk[u] = 0;
siz[u] = 1;
}
int find(int u) {
if (u == par[u]) return u;
else return find(par[u]);
}
bool same(int u, int v) {
return find(u) == find(v);
}
void merge(int u, int v) {
u = find(u), v = find(v);
if (u == v) return;
history.pb({u,par[u],rankk[u],siz[u]});
history.pb({v,par[v],rankk[v],siz[v]});
if (rankk[u] == rankk[v]) rankk[u]++;
if (rankk[u] < rankk[v]) swap(u, v);
par[v] = u;
siz[u] += siz[v];
}
void rollback(){
while(!history.empty()){
auto [u,paru,rankku,sizu] = history.back();
history.pop_back();
par[u] = paru;
rankk[u] = rankku;
siz[u] = sizu;
}
}
};
void solve(int test_case)
{
int n,m; cin >> n >> m;
vector<array<int,3>> edges(m+5);
rep1(i,m){
int u,v,w; cin >> u >> v >> w;
edges[i] = {u,v,w};
}
int q; cin >> q;
vector<array<int,3>> queries(q);
rep(i,q) rep(j,3) cin >> queries[i][j];
set<array<int,3>> st;
rep1(i,m){
array<int,3> ar = {edges[i][2],2,i};
st.insert(ar);
}
vector<int> ans(q,-1);
DSU dsu(n);
vector<bool> came(m+5);
rep(block,ceil2(q,B)){
dsu.reset(n);
vector<array<int,3>> rem_edges;
vector<array<int,3>> changed_edges;
for(int i = block*B; i < min((block+1)*B,q); ++i){
if(queries[i][0] == 1){
int id = queries[i][1];
changed_edges.pb({i,id,queries[i][2]});
array<int,3> ar = {edges[id][2],2,id};
if(st.count(ar)){
rem_edges.pb(ar);
st.erase(ar);
}
}
else{
auto [t,u,w] = queries[i];
array<int,3> ar = {w,1,i};
st.insert(ar);
}
}
reverse(all(changed_edges));
trav(ar,rem_edges){
changed_edges.pb({block*B-1,ar[2],edges[ar[2]][2]});
}
for(auto it = st.rbegin(); it != st.rend(); ++it){
auto [w,t,id] = *it;
if(t == 2){
auto [u,v,ww] = edges[id];
dsu.merge(u,v);
}
else{
dsu.history.clear();
for(auto [query_id,edge_id,edge_w] : changed_edges){
if(query_id > id) conts;
if(came[edge_id]) conts;
came[edge_id] = 1;
if(edge_w >= w){
auto [u,v,ww] = edges[edge_id];
dsu.merge(u,v);
}
}
for(auto [query_id,edge_id,w] : changed_edges){
came[edge_id] = 0;
}
int u = queries[id][1];
ans[id] = dsu.siz[dsu.find(u)];
dsu.rollback();
}
}
trav(ar,rem_edges){
st.insert(ar);
}
for(int i = block*B; i < min((block+1)*B,q); ++i){
if(queries[i][0] == 1){
int id = queries[i][1];
array<int,3> ar = {edges[id][2],2,id};
st.erase(ar);
edges[id][2] = queries[i][2];
ar[0] = queries[i][2];
st.insert(ar);
}
else{
auto [t,u,w] = queries[i];
array<int,3> ar = {w,1,i};
st.erase(ar);
}
}
}
rep(i,q){
if(ans[i] != -1){
cout << ans[i] << endl;
}
}
}
int main()
{
fastio;
int t = 1;
// cin >> t;
rep1(i, t) {
solve(i);
}
return 0;
}
# | Verdict | Execution time | Memory | Grader output |
---|
Fetching results... |
# | Verdict | Execution time | Memory | Grader output |
---|
Fetching results... |
# | Verdict | Execution time | Memory | Grader output |
---|
Fetching results... |
# | Verdict | Execution time | Memory | Grader output |
---|
Fetching results... |
# | Verdict | Execution time | Memory | Grader output |
---|
Fetching results... |
# | Verdict | Execution time | Memory | Grader output |
---|
Fetching results... |