Submission #644682

# Submission time Handle Problem Language Result Execution time Memory
644682 2022-09-25T06:17:51 Z ymm Fortune Telling 2 (JOI14_fortune_telling2) C++17
0 / 100
1 ms 340 KB
#include <bits/stdc++.h>
#define Loop(x,l,r) for (ll x = (l); x < (r); ++x)
#define LoopR(x,l,r) for (ll x = (r)-1; x >= (l); --x)
typedef long long ll;
typedef std::pair<int, int> pii;
typedef std::pair<ll , ll > pll;
using namespace std;

const int N = 200'032;
const int S0 = 32767;
const int S1 = 2048;
int a[N], b[N];
short sa[N], sb[N];
int q[N];
short sq[N];
int n;

/*
__attribute__((optimize("O3,unroll-loops"),target("avx2")))
void up(short x, short y, short z, int l, int r)
{
	Loop (i,l,r) {
		short v = sa[i], u = sb[i];
		v ^= v <= x? u: 0;
		v ^= v <= y? u: 0;
		v ^= v <= z? u: 0;
		sa[i] = v;
	}
}
*/
void up(short,short,short,int,int);
asm(".text\n .p2align 4\n .globl	_Z2upsssii\n .type	_Z2upsssii, @function\n _Z2upsssii:\n .myLFB9703:\n .cfi_startproc\n movl	%edx, %r11d\n movslq	%r8d, %r8\n movslq	%ecx, %rdx\n cmpq	%r8, %rdx\n jge	.myL73\n pushq	%rbp\n .cfi_def_cfa_offset 16\n .cfi_offset 6, -16\n movl	%edi, %r9d\n movq	%r8, %rdi\n movl	%esi, %r10d\n subq	%rdx, %rdi\n leaq	-1(%rdi), %rax\n movq	%rsp, %rbp\n .cfi_def_cfa_register 6\n pushq	%r15\n pushq	%r14\n .cfi_offset 15, -24\n .cfi_offset 14, -32\n movq	%rdx, %r14\n pushq	%r13\n pushq	%r12\n pushq	%rbx\n .cfi_offset 13, -40\n .cfi_offset 12, -48\n .cfi_offset 3, -56\n cmpq	$14, %rax\n jbe	.myL52\n movq	%rdi, %r15\n leaq	(%rdx,%rdx), %r12\n leaq	sa(%rip), %rsi\n xorl	%eax, %eax\n vmovd	%r9d, %xmm6\n shrq	$4, %r15\n vmovd	%r10d, %xmm5\n vmovd	%r11d, %xmm4\n salq	$5, %r15\n leaq	sb(%rip), %rbx\n leaq	(%rsi,%r12), %rcx\n leaq	-32(%r15), %r13\n addq	%rbx, %r12\n vpbroadcastw	%xmm6, %ymm6\n shrq	$5, %r13\n vpbroadcastw	%xmm5, %ymm5\n vpbroadcastw	%xmm4, %ymm4\n addq	$1, %r13\n andl	$3, %r13d\n je	.myL24\n cmpq	$1, %r13\n je	.myL63\n cmpq	$2, %r13\n je	.myL64\n vmovdqu	(%rcx), %ymm1\n vmovdqu	(%r12), %ymm0\n movl	$32, %eax\n vpcmpgtw	%ymm6, %ymm1, %ymm3\n vpxor	%ymm0, %ymm1, %ymm2\n vpblendvb	%ymm3, %ymm1, %ymm2, %ymm2\n vpcmpgtw	%ymm5, %ymm2, %ymm3\n vpxor	%ymm2, %ymm0, %ymm1\n vpblendvb	%ymm3, %ymm2, %ymm1, %ymm1\n vpcmpgtw	%ymm4, %ymm1, %ymm2\n vpxor	%ymm1, %ymm0, %ymm0\n vpblendvb	%ymm2, %ymm1, %ymm0, %ymm0\n vmovdqu	%ymm0, (%rcx)\n .myL64:\n vmovdqu	(%rcx,%rax), %ymm1\n vmovdqu	(%r12,%rax), %ymm0\n vpcmpgtw	%ymm6, %ymm1, %ymm3\n vpxor	%ymm0, %ymm1, %ymm2\n vpblendvb	%ymm3, %ymm1, %ymm2, %ymm2\n vpcmpgtw	%ymm5, %ymm2, %ymm3\n vpxor	%ymm2, %ymm0, %ymm1\n vpblendvb	%ymm3, %ymm2, %ymm1, %ymm1\n vpcmpgtw	%ymm4, %ymm1, %ymm2\n vpxor	%ymm1, %ymm0, %ymm0\n vpblendvb	%ymm2, %ymm1, %ymm0, %ymm0\n vmovdqu	%ymm0, (%rcx,%rax)\n addq	$32, %rax\n .myL63:\n vmovdqu	(%rcx,%rax), %ymm1\n vmovdqu	(%r12,%rax), %ymm0\n vpcmpgtw	%ymm6, %ymm1, %ymm3\n vpxor	%ymm0, %ymm1, %ymm2\n vpblendvb	%ymm3, %ymm1, %ymm2, %ymm2\n vpcmpgtw	%ymm5, %ymm2, %ymm3\n vpxor	%ymm2, %ymm0, %ymm1\n vpblendvb	%ymm3, %ymm2, %ymm1, %ymm1\n vpcmpgtw	%ymm4, %ymm1, %ymm2\n vpxor	%ymm1, %ymm0, %ymm0\n vpblendvb	%ymm2, %ymm1, %ymm0, %ymm0\n vmovdqu	%ymm0, (%rcx,%rax)\n addq	$32, %rax\n cmpq	%r15, %rax\n je	.myL69\n .myL24:\n vmovdqu	(%rcx,%rax), %ymm1\n vmovdqu	(%r12,%rax), %ymm0\n leaq	32(%rax), %r13\n vpcmpgtw	%ymm6, %ymm1, %ymm3\n vpxor	%ymm0, %ymm1, %ymm2\n vpblendvb	%ymm3, %ymm1, %ymm2, %ymm2\n vpcmpgtw	%ymm5, %ymm2, %ymm3\n vpxor	%ymm2, %ymm0, %ymm1\n vpblendvb	%ymm3, %ymm2, %ymm1, %ymm1\n vpcmpgtw	%ymm4, %ymm1, %ymm2\n vpxor	%ymm1, %ymm0, %ymm0\n vpblendvb	%ymm2, %ymm1, %ymm0, %ymm0\n vmovdqu	32(%rcx,%rax), %ymm1\n vmovdqu	%ymm0, (%rcx,%rax)\n vmovdqu	32(%r12,%rax), %ymm0\n vpcmpgtw	%ymm6, %ymm1, %ymm3\n vpxor	%ymm0, %ymm1, %ymm2\n vpblendvb	%ymm3, %ymm1, %ymm2, %ymm2\n vpcmpgtw	%ymm5, %ymm2, %ymm3\n vpxor	%ymm2, %ymm0, %ymm1\n vpblendvb	%ymm3, %ymm2, %ymm1, %ymm1\n vpcmpgtw	%ymm4, %ymm1, %ymm2\n vpxor	%ymm1, %ymm0, %ymm0\n vpblendvb	%ymm2, %ymm1, %ymm0, %ymm0\n vmovdqu	64(%rcx,%rax), %ymm1\n vmovdqu	%ymm0, 32(%rcx,%rax)\n vmovdqu	64(%r12,%rax), %ymm0\n vpcmpgtw	%ymm6, %ymm1, %ymm3\n vpxor	%ymm0, %ymm1, %ymm2\n vpblendvb	%ymm3, %ymm1, %ymm2, %ymm2\n vpcmpgtw	%ymm5, %ymm2, %ymm3\n vpxor	%ymm2, %ymm0, %ymm1\n vpblendvb	%ymm3, %ymm2, %ymm1, %ymm1\n vpcmpgtw	%ymm4, %ymm1, %ymm2\n vpxor	%ymm1, %ymm0, %ymm0\n vpblendvb	%ymm2, %ymm1, %ymm0, %ymm0\n vmovdqu	%ymm0, 64(%rcx,%rax)\n vmovdqu	64(%rcx,%r13), %ymm1\n leaq	96(%r13), %rax\n vmovdqu	64(%r12,%r13), %ymm0\n vpcmpgtw	%ymm6, %ymm1, %ymm3\n vpxor	%ymm0, %ymm1, %ymm2\n vpblendvb	%ymm3, %ymm1, %ymm2, %ymm2\n vpcmpgtw	%ymm5, %ymm2, %ymm3\n vpxor	%ymm2, %ymm0, %ymm1\n vpblendvb	%ymm3, %ymm2, %ymm1, %ymm1\n vpcmpgtw	%ymm4, %ymm1, %ymm2\n vpxor	%ymm1, %ymm0, %ymm0\n vpblendvb	%ymm2, %ymm1, %ymm0, %ymm0\n vmovdqu	%ymm0, 64(%rcx,%r13)\n cmpq	%r15, %rax\n jne	.myL24\n .myL69:\n movq	%rdi, %rax\n andq	$-16, %rax\n addq	%rax, %rdx\n cmpq	%rax, %rdi\n je	.myL77\n vzeroupper\n .myL23:\n subq	%rax, %rdi\n leaq	-1(%rdi), %rcx\n cmpq	$6, %rcx\n jbe	.myL28\n vmovd	%r9d, %xmm1\n vmovd	%r10d, %xmm4\n vmovd	%r11d, %xmm2\n addq	%r14, %rax\n leaq	(%rsi,%rax,2), %rcx\n vpbroadcastw	%xmm1, %xmm1\n vmovdqu	(%rbx,%rax,2), %xmm0\n vpbroadcastw	%xmm4, %xmm4\n vmovdqu	(%rcx), %xmm5\n vpbroadcastw	%xmm2, %xmm2\n movq	%rdi, %rax\n andq	$-8, %rax\n vpcmpgtw	%xmm1, %xmm5, %xmm1\n vpxor	%xmm0, %xmm5, %xmm3\n addq	%rax, %rdx\n vpblendvb	%xmm1, %xmm5, %xmm3, %xmm3\n vpcmpgtw	%xmm4, %xmm3, %xmm4\n vpxor	%xmm3, %xmm0, %xmm1\n vpblendvb	%xmm4, %xmm3, %xmm1, %xmm1\n vpcmpgtw	%xmm2, %xmm1, %xmm2\n vpxor	%xmm1, %xmm0, %xmm0\n vpblendvb	%xmm2, %xmm1, %xmm0, %xmm0\n vmovdqu	%xmm0, (%rcx)\n cmpq	%rax, %rdi\n je	.myL71\n .myL28:\n movzwl	(%rsi,%rdx,2), %eax\n movzwl	(%rbx,%rdx,2), %ecx\n leaq	1(%rdx), %rdi\n xorl	%eax, %ecx\n cmpw	%r9w, %ax\n cmovle	%ecx, %eax\n movzwl	(%rbx,%rdx,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r10w, %ax\n cmovle	%ecx, %eax\n movzwl	(%rbx,%rdx,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r11w, %ax\n cmovle	%ecx, %eax\n movw	%ax, (%rsi,%rdx,2)\n cmpq	%rdi, %r8\n jle	.myL71\n movzwl	(%rsi,%rdi,2), %eax\n movzwl	(%rbx,%rdi,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r9w, %ax\n cmovle	%ecx, %eax\n movzwl	(%rbx,%rdi,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r10w, %ax\n cmovle	%ecx, %eax\n movzwl	(%rbx,%rdi,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r11w, %ax\n cmovle	%ecx, %eax\n movw	%ax, (%rsi,%rdi,2)\n leaq	2(%rdx), %rdi\n cmpq	%rdi, %r8\n jle	.myL71\n movzwl	(%rsi,%rdi,2), %eax\n movzwl	(%rbx,%rdi,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r9w, %ax\n cmovle	%ecx, %eax\n movzwl	(%rbx,%rdi,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r10w, %ax\n cmovle	%ecx, %eax\n movzwl	(%rbx,%rdi,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r11w, %ax\n cmovle	%ecx, %eax\n movw	%ax, (%rsi,%rdi,2)\n leaq	3(%rdx), %rdi\n cmpq	%r8, %rdi\n jge	.myL71\n movzwl	(%rsi,%rdi,2), %eax\n movzwl	(%rbx,%rdi,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r9w, %ax\n cmovle	%ecx, %eax\n movzwl	(%rbx,%rdi,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r10w, %ax\n cmovle	%ecx, %eax\n movzwl	(%rbx,%rdi,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r11w, %ax\n cmovle	%ecx, %eax\n movw	%ax, (%rsi,%rdi,2)\n leaq	4(%rdx), %rdi\n cmpq	%rdi, %r8\n jle	.myL71\n movzwl	(%rsi,%rdi,2), %eax\n movzwl	(%rbx,%rdi,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r9w, %ax\n cmovle	%ecx, %eax\n movzwl	(%rbx,%rdi,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r10w, %ax\n cmovle	%ecx, %eax\n movzwl	(%rbx,%rdi,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r11w, %ax\n cmovle	%ecx, %eax\n movw	%ax, (%rsi,%rdi,2)\n leaq	5(%rdx), %rdi\n cmpq	%rdi, %r8\n jle	.myL71\n movzwl	(%rsi,%rdi,2), %eax\n movzwl	(%rbx,%rdi,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r9w, %ax\n cmovle	%ecx, %eax\n movzwl	(%rbx,%rdi,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r10w, %ax\n cmovle	%ecx, %eax\n movzwl	(%rbx,%rdi,2), %ecx\n xorl	%eax, %ecx\n cmpw	%r11w, %ax\n cmovle	%ecx, %eax\n addq	$6, %rdx\n movw	%ax, (%rsi,%rdi,2)\n cmpq	%rdx, %r8\n jle	.myL71\n movzwl	(%rsi,%rdx,2), %eax\n movzwl	(%rbx,%rdx,2), %ecx\n movl	%eax, %edi\n xorl	%ecx, %edi\n cmpw	%r9w, %ax\n cmovle	%edi, %eax\n movl	%eax, %edi\n xorl	%ecx, %edi\n cmpw	%r10w, %ax\n cmovle	%edi, %eax\n xorl	%eax, %ecx\n cmpw	%r11w, %ax\n cmovle	%ecx, %eax\n movw	%ax, (%rsi,%rdx,2)\n .myL71:\n popq	%rbx\n popq	%r12\n popq	%r13\n popq	%r14\n popq	%r15\n popq	%rbp\n .cfi_def_cfa 7, 8\n ret\n .p2align 4,,10\n .p2align 3\n .myL73:\n .cfi_restore 3\n .cfi_restore 6\n .cfi_restore 12\n .cfi_restore 13\n .cfi_restore 14\n .cfi_restore 15\n ret\n .myL52:\n .cfi_def_cfa 6, 16\n .cfi_offset 3, -56\n .cfi_offset 6, -16\n .cfi_offset 12, -48\n .cfi_offset 13, -40\n .cfi_offset 14, -32\n .cfi_offset 15, -24\n xorl	%eax, %eax\n leaq	sa(%rip), %rsi\n leaq	sb(%rip), %rbx\n jmp	.myL23\n .myL77:\n vzeroupper\n jmp	.myL71\n .cfi_endproc\n .myLFE9703:\n .size	_Z2upsssii, .-_Z2upsssii\n");

int main()
{
	cin.tie(0) -> sync_with_stdio(false);
	int k;
	cin >> n >> k;
	Loop (i,0,n)
		cin >> a[i] >> b[i];
	Loop (i,0,k)
		cin >> q[i];
	ll ans = 0;
	for (int l0 = 0; l0 < n; l0 += S0) {
		int r0 = min(n, l0+S0);
		vector<int> vec = {0};
		Loop (i,l0,r0) {
			vec.push_back(a[i]);
			vec.push_back(b[i]);
		}
		sort(vec.begin(), vec.end());
		vec.resize(unique(vec.begin(), vec.end()) - vec.begin());
		Loop (i,l0,r0) {
			sa[i] = lower_bound(vec.begin(), vec.end(), a[i]) - vec.begin();
			sb[i] = lower_bound(vec.begin(), vec.end(), b[i]) - vec.begin();
			sb[i] ^= sa[i];
		}
		Loop (i,0,k)
			sq[i] = upper_bound(vec.begin(), vec.end(), q[i]) - vec.begin() - 1;
		for (int l1 = l0; l1 < r0; l1 += S1) {
			int r1 = min(r0, l1+S1);
			for (int i = 0; i < k; i += 3)
				up(q[i+0], q[i+1], q[i+2], l1, r1);
		}
		Loop (i,l0,r0)
			ans += vec[sa[i]];
	}
	cout << ans << '\n';
}
# Verdict Execution time Memory Grader output
1 Incorrect 1 ms 340 KB Output isn't correct
2 Halted 0 ms 0 KB -
# Verdict Execution time Memory Grader output
1 Incorrect 1 ms 340 KB Output isn't correct
2 Halted 0 ms 0 KB -
# Verdict Execution time Memory Grader output
1 Incorrect 1 ms 340 KB Output isn't correct
2 Halted 0 ms 0 KB -