#pragma GCC optimize("O3,unroll-loops")
#include <bits/stdc++.h>
using namespace std;
/**
* PROBLEM: Team Coding (EGOI 2024)
* STRATEGY: Square Root Decomposition on language frequency.
* - For rare languages: Iterate only over nodes of that language.
* - For frequent languages: Run a full DFS to pre-calculate subtree capacities.
*/
const int THRESHOLD = 330; // Approximately sqrt(N)
const int INF = 1e9;
vector<vector<int>> adj;
vector<int> lang;
vector<int> depth;
vector<int> tin, tout;
vector<vector<int>> nodes_by_depth_tin;
int timer = 0;
// DFS to build Euler Tour and depth information
void pre_dfs(int u, int d) {
depth[u] = d;
tin[u] = ++timer;
nodes_by_depth_tin[d].push_back(tin[u]);
for (int v : adj[u]) {
pre_dfs(v, d + 1);
}
tout[u] = timer;
}
// Counts nodes in subtree of u at a specific depth using binary search
int count_in_subtree(int u, int target_depth) {
if (target_depth < 0 || target_depth >= nodes_by_depth_tin.size()) return 0;
const auto& v = nodes_by_depth_tin[target_depth];
return upper_bound(v.begin(), v.end(), tout[u]) - lower_bound(v.begin(), v.end(), tin[u]);
}
int main() {
ios::sync_with_stdio(false);
cin.tie(nullptr);
int n, k;
if (!(cin >> n >> k)) return 0;
lang.resize(n);
vector<vector<int>> nodes_with_lang(k);
for (int i = 0; i < n; i++) {
cin >> lang[i];
nodes_with_lang[lang[i]].push_back(i);
}
adj.assign(n, vector<int>());
for (int i = 1; i < n; i++) {
int p; cin >> p;
adj[p].push_back(i);
}
tin.resize(n); tout.resize(n); depth.resize(n);
nodes_by_depth_tin.resize(n);
pre_dfs(0, 0);
long long max_p = 0;
long long min_s = 0;
for (int l = 0; l < k; l++) {
if (nodes_with_lang[l].empty()) continue;
// --- CASE 1: LIGHT LANGUAGES (Occurrences < THRESHOLD) ---
if (nodes_with_lang[l].size() < THRESHOLD) {
map<int, int> lang_freq_at_depth;
for (int node : nodes_with_lang[l]) {
lang_freq_at_depth[depth[node]]++;
}
for (int leader : nodes_with_lang[l]) {
long long current_p = 0;
int current_initial_in_subtree = 0;
for (auto const& [d, total_c_at_d] : lang_freq_at_depth) {
if (d < depth[leader]) continue;
int capacity = count_in_subtree(leader, d);
current_p += min(total_c_at_d, capacity);
}
// Count how many nodes of language 'l' are already in the subtree
for (int node : nodes_with_lang[l]) {
if (tin[node] >= tin[leader] && tin[node] <= tout[leader]) {
current_initial_in_subtree++;
}
}
long long current_s = current_p - current_initial_in_subtree;
if (current_p > max_p) {
max_p = current_p;
min_s = current_s;
} else if (current_p == max_p) {
if (max_p > 0) min_s = min(min_s, current_s);
}
}
}
// --- CASE 2: HEAVY LANGUAGES (Occurrences >= THRESHOLD) ---
else {
vector<int> global_lang_count_at_depth(n, 0);
for (int node : nodes_with_lang[l]) {
global_lang_count_at_depth[depth[node]]++;
}
// We need to count initial language nodes in every subtree efficiently
vector<int> subtree_lang_count(n, 0);
// This is done by a simple post-order accumulation
auto get_subtree_counts = [&](auto self, int u) -> int {
int count = (lang[u] == l ? 1 : 0);
for (int v : adj[u]) count += self(self, v);
return subtree_lang_count[u] = count;
};
get_subtree_counts(get_subtree_counts, 0);
// Check every node as a potential leader
for (int leader = 0; leader < n; leader++) {
// Leader must eventually have language 'l'
// This is only possible if at least one node at depth[leader] has language 'l'
if (global_lang_count_at_depth[depth[leader]] == 0) continue;
long long current_p = 0;
// Since it's a heavy language, we iterate through depths of the leader's subtree
// We stop when the depth exceeds the max depth of the tree or language
for (int d = depth[leader]; d < n; d++) {
if (global_lang_count_at_depth[d] == 0) continue;
int capacity = count_in_subtree(leader, d);
if (capacity == 0) break; // Subtree ends
current_p += min(global_lang_count_at_depth[d], capacity);
}
long long current_s = current_p - subtree_lang_count[leader];
if (current_p > max_p) {
max_p = current_p;
min_s = current_s;
} else if (current_p == max_p) {
if (max_p > 0) min_s = min(min_s, current_s);
}
}
}
}
cout << max_p << " " << min_s << endl;
return 0;
}