#include <iostream>
#include <vector>
#include <unordered_map> // Use unordered_map for potentially faster average time
#include <algorithm>     // For std::min, std::max, std::swap
#include <functional>    // For std::function
using namespace std;
// Global variables for tree structure and properties
vector<vector<int>> adj; // Adjacency list for children (representing the hierarchy)
vector<int> languages;   // Preferred language for each employee (indexed by employee ID)
vector<int> depth_arr;   // Depth of each employee from Anneke (CEO 0 at depth 0)
vector<int> subtree_size; // Stores the size of each subtree, used to identify heavy children
// Global variables to store the best results found
int max_P = 0; // Maximum number of employees in the project team
int min_S = 0; // Minimum number of switches needed to achieve max_P
// Global variable to store total counts of (language, depth) pairs for the entire company
// total_depth_lang_counts[d][k] stores count of employees at depth 'd' with language 'k'
vector<unordered_map<int, int>> total_depth_lang_counts;
// This map will be managed globally by the DSU on tree logic.
// For each depth 'd', it stores:
//    - an unordered_map of (language -> count) for that depth within the currently tracked subtree
//    - the total number of employees at that depth within the currently tracked subtree
unordered_map<int, pair<unordered_map<int, int>, int>> current_subtree_data;
// These global sums will be calculated for the CURRENTLY BUILT current_subtree_data
// (for the node 'u' that is the root of the current heavy path segment for which P and S are being computed)
long long current_p_sum_for_u = 0; // Sum of P for the current 'u' as team lead
long long current_s_sum_for_u = 0; // Sum of S for the current 'u' as team lead
int current_u_lang = -1;           // The preferred language of the current node 'u' being processed
// DFS to compute the depth of each employee in the tree and their subtree sizes
// u: current employee ID
// d: current depth
void compute_depths_dfs(int u, int d) {
    depth_arr[u] = d;
    subtree_size[u] = 1; // Node itself contributes 1 to its subtree size
    for (int v : adj[u]) { // Iterate through subordinates (children)
        compute_depths_dfs(v, d + 1); // Recurse for children at next depth
        subtree_size[u] += subtree_size[v]; // Add child's subtree size to parent's
    }
}
// Function to add/remove a single node's contribution to current_subtree_data and update sums
// node_id: the employee ID to add/remove
// delta: +1 for adding, -1 for removing
void add_node_to_current_data(int node_id, int delta) {
    int d = depth_arr[node_id];
    int lang = languages[node_id];
    // --- Before updating current_subtree_data, remove its old contribution to P/S sums ---
    // Get old counts for 'node_id's depth 'd'
    int old_lang_count_at_d_in_subtree = current_subtree_data[d].first.count(current_u_lang) ? current_subtree_data[d].first.at(current_u_lang) : 0;
    int old_total_count_at_d_in_subtree = current_subtree_data[d].second;
    if (old_total_count_at_d_in_subtree != 0) { // Only subtract if there was a previous contribution at this depth
        current_p_sum_for_u -= old_lang_count_at_d_in_subtree;
        int old_in_subtree_wrong_lang_at_d = old_total_count_at_d_in_subtree - old_lang_count_at_d_in_subtree;
        int old_global_correct_lang_at_d = total_depth_lang_counts[d].count(current_u_lang) ? total_depth_lang_counts[d].at(current_u_lang) : 0;
        int old_out_of_subtree_correct_lang_at_d = old_global_correct_lang_at_d - old_lang_count_at_d_in_subtree;
        current_s_sum_for_u -= min(old_in_subtree_wrong_lang_at_d, old_out_of_subtree_correct_lang_at_d);
    }
    // --- Update current_subtree_data with the new node's contribution ---
    current_subtree_data[d].first[lang] += delta;
    current_subtree_data[d].second += delta;
    // --- After updating current_subtree_data, add its new contribution to P/S sums ---
    int new_lang_count_at_d_in_subtree = current_subtree_data[d].first.count(current_u_lang) ? current_subtree_data[d].first.at(current_u_lang) : 0;
    int new_total_count_at_d_in_subtree = current_subtree_data[d].second;
    current_p_sum_for_u += new_lang_count_at_d_in_subtree;
    int new_in_subtree_wrong_lang_at_d = new_total_count_at_d_in_subtree - new_lang_count_at_d_in_subtree;
    int new_global_correct_lang_at_d = total_depth_lang_counts[d].count(current_u_lang) ? total_depth_lang_counts[d].at(current_u_lang) : 0;
    int new_out_of_subtree_correct_lang_at_d = new_global_correct_lang_at_d - new_lang_count_at_d_in_subtree;
    current_s_sum_for_u += min(new_in_subtree_wrong_lang_at_d, new_out_of_subtree_correct_lang_at_d);
    // --- Optional: Clean up empty entries to keep maps compact and efficient ---
    if (current_subtree_data[d].second == 0) { // If no employees left at this depth, remove the depth entry
        current_subtree_data.erase(d);
    } else if (current_subtree_data[d].first[lang] == 0) { // If no employees left with this lang at this depth, remove language entry
        current_subtree_data[d].first.erase(lang);
    }
}
// DFS function for DSU on trees algorithm
// u: current node being processed
// parent: parent of u (for tree traversal, not directly used in DSU logic here)
// keep: boolean indicating if u's subtree data should be kept in current_subtree_data after this call
void dfs_dsu_on_tree(int u, int parent, bool keep) {
    int heavy_child = -1;
    int max_sz = -1;
    // Find the heavy child (the child with the largest subtree size)
    for (int v : adj[u]) {
        if (subtree_size[v] > max_sz) {
            max_sz = subtree_size[v];
            heavy_child = v;
        }
    }
    // --- Step 1: Recursively call for all light children ---
    // Their data will NOT be kept in current_subtree_data after their calls return
    for (int v : adj[u]) {
        if (v != heavy_child) {
            dfs_dsu_on_tree(v, u, false); 
        }
    }
    // --- Step 2: Recursively call for the heavy child (if it exists) ---
    // Its data WILL be kept in current_subtree_data
    if (heavy_child != -1) {
        dfs_dsu_on_tree(heavy_child, u, true);
    }
    
    // --- Step 3: Now current_subtree_data contains all the stats for the heavy child's subtree (if any) ---
    // Prepare for calculating P and S for 'u' as team lead
    current_u_lang = languages[u];
    current_p_sum_for_u = 0;
    current_s_sum_for_u = 0;
    // Recalculate P and S sums from scratch based on current_subtree_data after heavy child's call.
    // This is required because current_u_lang changes for each node 'u'.
    for (auto const& [d, stats] : current_subtree_data) {
        int lang_count = stats.first.count(current_u_lang) ? stats.first.at(current_u_lang) : 0;
        int total_count = stats.second;
        current_p_sum_for_u += lang_count;
        int in_subtree_wrong_lang = total_count - lang_count;
        int global_correct_lang = total_depth_lang_counts[d].count(current_u_lang) ? total_depth_lang_counts[d].at(current_u_lang) : 0;
        int out_of_subtree_correct_lang = global_correct_lang - lang_count;
        current_s_sum_for_u += min(in_subtree_wrong_lang, out_of_subtree_correct_lang);
    }
    // --- Step 4: Add current node 'u' itself to current_subtree_data and update sums ---
    add_node_to_current_data(u, 1);
    // --- Step 5: Add data from light children's subtrees to current_subtree_data and update sums ---
    // This requires re-traversing light children's subtrees to add their nodes one by one
    for (int v : adj[u]) {
        if (v != heavy_child) {
            // Lambda function to add all nodes in a subtree
            function<void(int)> add_subtree_nodes = 
                [&](int curr_node) {
                add_node_to_current_data(curr_node, 1); // Add current node
                for (int child : adj[curr_node]) {
                    add_subtree_nodes(child); // Recurse for children
                }
            };
            add_subtree_nodes(v); // Start adding from the light child's root
        }
    }
    // --- Step 6: At this point, current_subtree_data and current_p_sum_for_u/current_s_sum_for_u
    // correctly reflect the state for the entire subtree rooted at 'u', with languages[u] as team lead. ---
    
    // Update global maximum P and minimum S
    if (current_p_sum_for_u + current_s_sum_for_u > max_P) {
        max_P = current_p_sum_for_u + current_s_sum_for_u;
        min_S = (int)current_s_sum_for_u; // Cast to int for min_S
    } else if (current_p_sum_for_u + current_s_sum_for_u == max_P) {
        min_S = min(min_S, (int)current_s_sum_for_u);
    }
    // --- Step 7: If 'u' is a light child (its data should not persist for parent's calculation),
    // remove its entire subtree data from current_subtree_data and update sums. ---
    if (!keep) {
        function<void(int)> remove_subtree_nodes = 
            [&](int curr_node) {
            add_node_to_current_data(curr_node, -1); // Use -1 to subtract (remove) the node's contribution
            for (int child : adj[curr_node]) {
                remove_subtree_nodes(child);
            }
        };
        remove_subtree_nodes(u); // Start removing from 'u' itself
    }
}
int main() {
    // Optimize C++ standard streams for faster input/output
    ios_base::sync_with_stdio(false);
    cin.tie(NULL);
    int N, K; // N: number of employees, K: number of programming languages
    cin >> N >> K;
    languages.resize(N);
    for (int i = 0; i < N; ++i) {
        cin >> languages[i]; // Read preferred language for each employee
    }
    // Build adjacency list. Employee 'i' reports to 'b_i', so 'b_i' is parent of 'i'.
    adj.resize(N);
    for (int i = 1; i < N; ++i) { // Employees 1 to N-1 have bosses
        int b;
        cin >> b; // Direct boss of employee 'i'
        adj[b].push_back(i); // Add 'i' as a child of 'b'
    }
    // Compute depths for all employees and their subtree sizes, starting with Anneke (CEO 0) at depth 0
    depth_arr.resize(N);
    subtree_size.resize(N); 
    compute_depths_dfs(0, 0);
    // Precompute total counts of (language, depth) for the entire company.
    // Max possible depth is N-1.
    total_depth_lang_counts.resize(N); 
    for (int i = 0; i < N; ++i) {
        total_depth_lang_counts[depth_arr[i]][languages[i]]++;
    }
    // Start DSU on trees from the root (Anneke, employee 0)
    // The root's data is always kept (passed 'true' for 'keep')
    dfs_dsu_on_tree(0, -1, true); 
    // Output the results
    cout << max_P << " " << min_S << endl;
    return 0;
}
| # | Verdict  | Execution time | Memory | Grader output | 
|---|
| Fetching results... | 
| # | Verdict  | Execution time | Memory | Grader output | 
|---|
| Fetching results... | 
| # | Verdict  | Execution time | Memory | Grader output | 
|---|
| Fetching results... | 
| # | Verdict  | Execution time | Memory | Grader output | 
|---|
| Fetching results... | 
| # | Verdict  | Execution time | Memory | Grader output | 
|---|
| Fetching results... |