#include "candies.h"
#include <bits/stdc++.h>
#define Loop(x,l,r) for (ll x = (l); x < (r); ++x)
typedef long long ll;
using namespace std;
#pragma GCC optimize("O3,unroll-loops")
#pragma GCC target("avx2")
const int inf = 1e9+1;
const int N = 200'010;
const int S = 2048;
int a[N], c[N];
void add(int l, int r, int x);
void sub(int l, int r, int x);
void addsub(int l, int r, int x, int y);
void subadd(int l, int r, int x, int y);
asm(R"dard(
.p2align 4
.globl _Z3addiii
.type _Z3addiii, @function
_Z3addiii:
.LmyFB9795:
.cfi_startproc
vmovd %edx, %xmm1
movslq %esi, %r10
movslq %edi, %rdx
cmpq %r10, %rdx
jge .Lmy55
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %r10, %r8
vmovdqa %xmm1, %xmm0
subq %rdx, %r8
leaq -1(%r8), %rax
movq %rsp, %rbp
.cfi_def_cfa_register 6
pushq %r12
pushq %rbx
.cfi_offset 12, -24
.cfi_offset 3, -32
movq %rdx, %rbx
cmpq $6, %rax
jbe .Lmy8
movq %r8, %r12
leaq a(%rip), %r9
xorl %eax, %eax
shrq $3, %r12
leaq 0(,%rdx,4), %rdi
vpbroadcastd %xmm1, %ymm2
salq $5, %r12
leaq c(%rip), %r11
leaq (%r9,%rdi), %rcx
leaq -32(%r12), %rsi
addq %r11, %rdi
shrq $5, %rsi
addq $1, %rsi
andl $7, %esi
je .Lmy4
cmpq $1, %rsi
je .Lmy35
cmpq $2, %rsi
je .Lmy36
cmpq $3, %rsi
je .Lmy37
cmpq $4, %rsi
je .Lmy38
cmpq $5, %rsi
je .Lmy39
cmpq $6, %rsi
jne .Lmy59
.Lmy40:
vpaddd (%rcx,%rax), %ymm2, %ymm5
vpminsd (%rdi,%rax), %ymm5, %ymm6
vmovdqu %ymm6, (%rcx,%rax)
addq $32, %rax
.Lmy39:
vpaddd (%rcx,%rax), %ymm2, %ymm7
vpminsd (%rdi,%rax), %ymm7, %ymm8
vmovdqu %ymm8, (%rcx,%rax)
addq $32, %rax
.Lmy38:
vpaddd (%rcx,%rax), %ymm2, %ymm9
vpminsd (%rdi,%rax), %ymm9, %ymm10
vmovdqu %ymm10, (%rcx,%rax)
addq $32, %rax
.Lmy37:
vpaddd (%rcx,%rax), %ymm2, %ymm11
vpminsd (%rdi,%rax), %ymm11, %ymm12
vmovdqu %ymm12, (%rcx,%rax)
addq $32, %rax
.Lmy36:
vpaddd (%rcx,%rax), %ymm2, %ymm13
vpminsd (%rdi,%rax), %ymm13, %ymm14
vmovdqu %ymm14, (%rcx,%rax)
addq $32, %rax
.Lmy35:
vpaddd (%rcx,%rax), %ymm2, %ymm15
vpminsd (%rdi,%rax), %ymm15, %ymm3
vmovdqu %ymm3, (%rcx,%rax)
addq $32, %rax
cmpq %rax, %r12
je .Lmy51
.Lmy4:
vpaddd (%rcx,%rax), %ymm2, %ymm4
vpminsd (%rdi,%rax), %ymm4, %ymm5
vpaddd 32(%rcx,%rax), %ymm2, %ymm6
vpaddd 64(%rcx,%rax), %ymm2, %ymm8
vmovdqu %ymm5, (%rcx,%rax)
vpminsd 32(%rdi,%rax), %ymm6, %ymm7
vpaddd 96(%rcx,%rax), %ymm2, %ymm10
vpaddd 128(%rcx,%rax), %ymm2, %ymm12
vpaddd 160(%rcx,%rax), %ymm2, %ymm14
vpaddd 192(%rcx,%rax), %ymm2, %ymm3
vmovdqu %ymm7, 32(%rcx,%rax)
vpminsd 64(%rdi,%rax), %ymm8, %ymm9
vpaddd 224(%rcx,%rax), %ymm2, %ymm5
vmovdqu %ymm9, 64(%rcx,%rax)
vpminsd 96(%rdi,%rax), %ymm10, %ymm11
vmovdqu %ymm11, 96(%rcx,%rax)
vpminsd 128(%rdi,%rax), %ymm12, %ymm13
vmovdqu %ymm13, 128(%rcx,%rax)
vpminsd 160(%rdi,%rax), %ymm14, %ymm15
vmovdqu %ymm15, 160(%rcx,%rax)
vpminsd 192(%rdi,%rax), %ymm3, %ymm4
vmovdqu %ymm4, 192(%rcx,%rax)
vpminsd 224(%rdi,%rax), %ymm5, %ymm6
vmovdqu %ymm6, 224(%rcx,%rax)
addq $256, %rax
cmpq %rax, %r12
jne .Lmy4
.Lmy51:
movq %r8, %rdi
andq $-8, %rdi
addq %rdi, %rdx
testb $7, %r8b
je .Lmy60
vzeroupper
.Lmy3:
subq %rdi, %r8
leaq -1(%r8), %rcx
cmpq $2, %rcx
jbe .Lmy6
addq %rbx, %rdi
movq %r8, %r12
vpshufd $0, %xmm0, %xmm0
leaq (%r9,%rdi,4), %rbx
andq $-4, %r12
vpaddd (%rbx), %xmm0, %xmm2
vpminsd (%r11,%rdi,4), %xmm2, %xmm7
addq %r12, %rdx
andl $3, %r8d
vmovdqu %xmm7, (%rbx)
je .Lmy53
.Lmy6:
vmovd (%r9,%rdx,4), %xmm8
vmovd (%r11,%rdx,4), %xmm10
leaq 1(%rdx), %r8
vpaddd %xmm8, %xmm1, %xmm9
vpminsd %xmm10, %xmm9, %xmm11
vmovd %xmm11, (%r9,%rdx,4)
cmpq %r10, %r8
jge .Lmy53
vmovd (%r9,%r8,4), %xmm12
vmovd (%r11,%r8,4), %xmm14
addq $2, %rdx
vpaddd %xmm12, %xmm1, %xmm13
vpminsd %xmm14, %xmm13, %xmm15
vmovd %xmm15, (%r9,%r8,4)
cmpq %rdx, %r10
jle .Lmy53
vmovd (%r9,%rdx,4), %xmm3
vpaddd %xmm3, %xmm1, %xmm4
vmovd (%r11,%rdx,4), %xmm1
vpminsd %xmm1, %xmm4, %xmm5
vmovd %xmm5, (%r9,%rdx,4)
.Lmy53:
popq %rbx
popq %r12
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.p2align 4,,10
.p2align 3
.Lmy59:
.cfi_restore_state
vpaddd (%rcx), %ymm2, %ymm3
vpminsd (%rdi), %ymm3, %ymm4
movl $32, %eax
vmovdqu %ymm4, (%rcx)
jmp .Lmy40
.p2align 4,,10
.p2align 3
.Lmy55:
.cfi_def_cfa 7, 8
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
ret
.p2align 4,,10
.p2align 3
.Lmy60:
.cfi_def_cfa 6, 16
.cfi_offset 3, -32
.cfi_offset 6, -16
.cfi_offset 12, -24
vzeroupper
popq %rbx
popq %r12
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.Lmy8:
.cfi_restore_state
xorl %edi, %edi
leaq a(%rip), %r9
leaq c(%rip), %r11
jmp .Lmy3
.cfi_endproc
.LmyFE9795:
.size _Z3addiii, .-_Z3addiii
.p2align 4
.globl _Z3subiii
.type _Z3subiii, @function
_Z3subiii:
.LmyFB9796:
.cfi_startproc
vmovd %edx, %xmm1
movslq %esi, %r8
movslq %edi, %rdx
vmovdqa %xmm1, %xmm3
cmpq %r8, %rdx
jge .Lmy113
movq %r8, %rsi
movq %rdx, %r9
subq %rdx, %rsi
leaq -1(%rsi), %rax
cmpq $6, %rax
jbe .Lmy68
movq %rsi, %rcx
leaq a(%rip), %rdi
vpbroadcastd %xmm1, %ymm2
shrq $3, %rcx
leaq (%rdi,%rdx,4), %r11
vpxor %xmm0, %xmm0, %xmm0
salq $5, %rcx
leaq (%rcx,%r11), %r10
subq $32, %rcx
shrq $5, %rcx
addq $1, %rcx
andl $7, %ecx
je .Lmy64
cmpq $1, %rcx
je .Lmy95
cmpq $2, %rcx
je .Lmy96
cmpq $3, %rcx
je .Lmy97
cmpq $4, %rcx
je .Lmy98
cmpq $5, %rcx
je .Lmy99
cmpq $6, %rcx
jne .Lmy114
.Lmy100:
vmovdqu (%r11), %ymm6
addq $32, %r11
vpsubd %ymm2, %ymm6, %ymm8
vpmaxsd %ymm0, %ymm8, %ymm9
vmovdqu %ymm9, -32(%r11)
.Lmy99:
vmovdqu (%r11), %ymm10
addq $32, %r11
vpsubd %ymm2, %ymm10, %ymm11
vpmaxsd %ymm0, %ymm11, %ymm12
vmovdqu %ymm12, -32(%r11)
.Lmy98:
vmovdqu (%r11), %ymm13
addq $32, %r11
vpsubd %ymm2, %ymm13, %ymm14
vpmaxsd %ymm0, %ymm14, %ymm15
vmovdqu %ymm15, -32(%r11)
.Lmy97:
vmovdqu (%r11), %ymm7
addq $32, %r11
vpsubd %ymm2, %ymm7, %ymm4
vpmaxsd %ymm0, %ymm4, %ymm5
vmovdqu %ymm5, -32(%r11)
.Lmy96:
vmovdqu (%r11), %ymm6
addq $32, %r11
vpsubd %ymm2, %ymm6, %ymm8
vpmaxsd %ymm0, %ymm8, %ymm9
vmovdqu %ymm9, -32(%r11)
.Lmy95:
vmovdqu (%r11), %ymm10
addq $32, %r11
vpsubd %ymm2, %ymm10, %ymm11
vpmaxsd %ymm0, %ymm11, %ymm12
vmovdqu %ymm12, -32(%r11)
cmpq %r11, %r10
je .Lmy111
.Lmy64:
vmovdqu (%r11), %ymm13
vmovdqu 32(%r11), %ymm7
addq $256, %r11
vmovdqu -192(%r11), %ymm6
vmovdqu -160(%r11), %ymm10
vpsubd %ymm2, %ymm13, %ymm14
vpsubd %ymm2, %ymm7, %ymm4
vmovdqu -128(%r11), %ymm13
vmovdqu -96(%r11), %ymm7
vpsubd %ymm2, %ymm6, %ymm8
vpsubd %ymm2, %ymm10, %ymm11
vmovdqu -64(%r11), %ymm6
vmovdqu -32(%r11), %ymm10
vpmaxsd %ymm0, %ymm14, %ymm15
vpmaxsd %ymm0, %ymm4, %ymm5
vpmaxsd %ymm0, %ymm8, %ymm9
vpmaxsd %ymm0, %ymm11, %ymm12
vmovdqu %ymm15, -256(%r11)
vpsubd %ymm2, %ymm13, %ymm14
vpsubd %ymm2, %ymm7, %ymm4
vmovdqu %ymm5, -224(%r11)
vpsubd %ymm2, %ymm6, %ymm8
vpsubd %ymm2, %ymm10, %ymm11
vmovdqu %ymm9, -192(%r11)
vmovdqu %ymm12, -160(%r11)
vpmaxsd %ymm0, %ymm14, %ymm15
vpmaxsd %ymm0, %ymm4, %ymm5
vpmaxsd %ymm0, %ymm8, %ymm9
vpmaxsd %ymm0, %ymm11, %ymm12
vmovdqu %ymm15, -128(%r11)
vmovdqu %ymm5, -96(%r11)
vmovdqu %ymm9, -64(%r11)
vmovdqu %ymm12, -32(%r11)
cmpq %r11, %r10
jne .Lmy64
.Lmy111:
movq %rsi, %rax
andq $-8, %rax
addq %rax, %rdx
testb $7, %sil
je .Lmy115
vzeroupper
.Lmy63:
subq %rax, %rsi
leaq -1(%rsi), %r11
cmpq $2, %r11
jbe .Lmy66
addq %r9, %rax
vpshufd $0, %xmm3, %xmm3
vpxor %xmm13, %xmm13, %xmm13
movq %rsi, %rcx
leaq (%rdi,%rax,4), %r9
andq $-4, %rcx
vmovdqu (%r9), %xmm2
addq %rcx, %rdx
andl $3, %esi
vpsubd %xmm3, %xmm2, %xmm0
vpmaxsd %xmm13, %xmm0, %xmm14
vmovdqu %xmm14, (%r9)
je .Lmy113
.Lmy66:
vmovd (%rdi,%rdx,4), %xmm15
vpxor %xmm4, %xmm4, %xmm4
leaq 1(%rdx), %rsi
vpsubd %xmm1, %xmm15, %xmm7
vpmaxsd %xmm4, %xmm7, %xmm5
vmovd %xmm5, (%rdi,%rdx,4)
cmpq %rsi, %r8
jle .Lmy113
vpinsrd $0, (%rdi,%rsi,4), %xmm4, %xmm6
addq $2, %rdx
vpsubd %xmm1, %xmm6, %xmm8
vpmaxsd %xmm4, %xmm8, %xmm9
vmovd %xmm9, (%rdi,%rsi,4)
cmpq %rdx, %r8
jle .Lmy113
vpinsrd $0, (%rdi,%rdx,4), %xmm4, %xmm10
vpsubd %xmm1, %xmm10, %xmm11
vpmaxsd %xmm4, %xmm11, %xmm1
vmovd %xmm1, (%rdi,%rdx,4)
.Lmy113:
ret
.p2align 4,,10
.p2align 3
.Lmy114:
vmovdqu (%r11), %ymm7
addq $32, %r11
vpsubd %ymm2, %ymm7, %ymm4
vpmaxsd %ymm0, %ymm4, %ymm5
vmovdqu %ymm5, -32(%r11)
jmp .Lmy100
.p2align 4,,10
.p2align 3
.Lmy115:
vzeroupper
ret
.Lmy68:
xorl %eax, %eax
leaq a(%rip), %rdi
jmp .Lmy63
.cfi_endproc
.LmyFE9796:
.size _Z3subiii, .-_Z3subiii
.p2align 4
.globl _Z6addsubiiii
.type _Z6addsubiiii, @function
_Z6addsubiiii:
.LmyFB9797:
.cfi_startproc
vmovd %edx, %xmm1
movslq %esi, %r10
movslq %edi, %rdx
cmpq %r10, %rdx
jge .Lmy170
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %r10, %rdi
vmovd %ecx, %xmm2
vmovdqa %xmm1, %xmm0
subq %rdx, %rdi
vmovdqa %xmm2, %xmm6
leaq -1(%rdi), %rax
movq %rsp, %rbp
.cfi_def_cfa_register 6
pushq %r12
pushq %rbx
.cfi_offset 12, -24
.cfi_offset 3, -32
movq %rdx, %rbx
cmpq $6, %rax
jbe .Lmy123
movq %rdi, %r12
leaq a(%rip), %r9
vpxor %xmm3, %xmm3, %xmm3
xorl %eax, %eax
shrq $3, %r12
vpbroadcastd %xmm1, %ymm5
vpbroadcastd %xmm2, %ymm4
salq $5, %r12
leaq 0(,%rdx,4), %rsi
leaq c(%rip), %r11
leaq -32(%r12), %r8
leaq (%r9,%rsi), %rcx
addq %r11, %rsi
shrq $5, %r8
addq $1, %r8
andl $7, %r8d
je .Lmy119
cmpq $1, %r8
je .Lmy150
cmpq $2, %r8
je .Lmy151
cmpq $3, %r8
je .Lmy152
cmpq $4, %r8
je .Lmy153
cmpq $5, %r8
je .Lmy154
cmpq $6, %r8
jne .Lmy173
.Lmy155:
vpaddd (%rcx,%rax), %ymm5, %ymm11
vpminsd (%rsi,%rax), %ymm11, %ymm12
vpsubd %ymm4, %ymm12, %ymm13
vpmaxsd %ymm3, %ymm13, %ymm14
vmovdqu %ymm14, (%rcx,%rax)
addq $32, %rax
.Lmy154:
vpaddd (%rcx,%rax), %ymm5, %ymm15
vpminsd (%rsi,%rax), %ymm15, %ymm7
vpsubd %ymm4, %ymm7, %ymm8
vpmaxsd %ymm3, %ymm8, %ymm9
vmovdqu %ymm9, (%rcx,%rax)
addq $32, %rax
.Lmy153:
vpaddd (%rcx,%rax), %ymm5, %ymm10
vpminsd (%rsi,%rax), %ymm10, %ymm11
vpsubd %ymm4, %ymm11, %ymm12
vpmaxsd %ymm3, %ymm12, %ymm13
vmovdqu %ymm13, (%rcx,%rax)
addq $32, %rax
.Lmy152:
vpaddd (%rcx,%rax), %ymm5, %ymm14
vpminsd (%rsi,%rax), %ymm14, %ymm15
vpsubd %ymm4, %ymm15, %ymm7
vpmaxsd %ymm3, %ymm7, %ymm8
vmovdqu %ymm8, (%rcx,%rax)
addq $32, %rax
.Lmy151:
vpaddd (%rcx,%rax), %ymm5, %ymm9
vpminsd (%rsi,%rax), %ymm9, %ymm10
vpsubd %ymm4, %ymm10, %ymm11
vpmaxsd %ymm3, %ymm11, %ymm12
vmovdqu %ymm12, (%rcx,%rax)
addq $32, %rax
.Lmy150:
vpaddd (%rcx,%rax), %ymm5, %ymm13
vpminsd (%rsi,%rax), %ymm13, %ymm14
vpsubd %ymm4, %ymm14, %ymm15
vpmaxsd %ymm3, %ymm15, %ymm7
vmovdqu %ymm7, (%rcx,%rax)
addq $32, %rax
cmpq %rax, %r12
je .Lmy166
.Lmy119:
vpaddd (%rcx,%rax), %ymm5, %ymm8
vpminsd (%rsi,%rax), %ymm8, %ymm9
vpaddd 32(%rcx,%rax), %ymm5, %ymm12
vpaddd 64(%rcx,%rax), %ymm5, %ymm7
vpsubd %ymm4, %ymm9, %ymm10
vpmaxsd %ymm3, %ymm10, %ymm11
vmovdqu %ymm11, (%rcx,%rax)
vpminsd 32(%rsi,%rax), %ymm12, %ymm13
vpaddd 96(%rcx,%rax), %ymm5, %ymm11
vpsubd %ymm4, %ymm13, %ymm14
vpmaxsd %ymm3, %ymm14, %ymm15
vmovdqu %ymm15, 32(%rcx,%rax)
vpminsd 64(%rsi,%rax), %ymm7, %ymm8
vpaddd 128(%rcx,%rax), %ymm5, %ymm15
vpsubd %ymm4, %ymm8, %ymm9
vpmaxsd %ymm3, %ymm9, %ymm10
vmovdqu %ymm10, 64(%rcx,%rax)
vpminsd 96(%rsi,%rax), %ymm11, %ymm12
vpaddd 160(%rcx,%rax), %ymm5, %ymm10
vpsubd %ymm4, %ymm12, %ymm13
vpmaxsd %ymm3, %ymm13, %ymm14
vmovdqu %ymm14, 96(%rcx,%rax)
vpminsd 128(%rsi,%rax), %ymm15, %ymm7
vpaddd 192(%rcx,%rax), %ymm5, %ymm14
vpsubd %ymm4, %ymm7, %ymm8
vpmaxsd %ymm3, %ymm8, %ymm9
vmovdqu %ymm9, 128(%rcx,%rax)
vpminsd 160(%rsi,%rax), %ymm10, %ymm11
vpaddd 224(%rcx,%rax), %ymm5, %ymm9
vpsubd %ymm4, %ymm11, %ymm12
vpmaxsd %ymm3, %ymm12, %ymm13
vmovdqu %ymm13, 160(%rcx,%rax)
vpminsd 192(%rsi,%rax), %ymm14, %ymm15
vpsubd %ymm4, %ymm15, %ymm7
vpmaxsd %ymm3, %ymm7, %ymm8
vmovdqu %ymm8, 192(%rcx,%rax)
vpminsd 224(%rsi,%rax), %ymm9, %ymm10
vpsubd %ymm4, %ymm10, %ymm11
vpmaxsd %ymm3, %ymm11, %ymm12
vmovdqu %ymm12, 224(%rcx,%rax)
addq $256, %rax
cmpq %rax, %r12
jne .Lmy119
.Lmy166:
movq %rdi, %rsi
andq $-8, %rsi
addq %rsi, %rdx
testb $7, %dil
je .Lmy174
vzeroupper
.Lmy118:
subq %rsi, %rdi
leaq -1(%rdi), %rcx
cmpq $2, %rcx
jbe .Lmy121
addq %rbx, %rsi
vpshufd $0, %xmm0, %xmm0
vpshufd $0, %xmm6, %xmm6
movq %rdi, %r12
leaq (%r9,%rsi,4), %rbx
andq $-4, %r12
vpxor %xmm3, %xmm3, %xmm3
vpaddd (%rbx), %xmm0, %xmm5
vpminsd (%r11,%rsi,4), %xmm5, %xmm4
addq %r12, %rdx
andl $3, %edi
vpsubd %xmm6, %xmm4, %xmm13
vpmaxsd %xmm3, %xmm13, %xmm14
vmovdqu %xmm14, (%rbx)
je .Lmy168
.Lmy121:
vmovd (%r9,%rdx,4), %xmm15
vmovd (%r11,%rdx,4), %xmm8
vpxor %xmm11, %xmm11, %xmm11
vpaddd %xmm15, %xmm1, %xmm7
leaq 1(%rdx), %rdi
vpminsd %xmm8, %xmm7, %xmm9
vpsubd %xmm2, %xmm9, %xmm10
vpmaxsd %xmm11, %xmm10, %xmm12
vmovd %xmm12, (%r9,%rdx,4)
cmpq %rdi, %r10
jle .Lmy168
vpinsrd $0, (%r9,%rdi,4), %xmm11, %xmm0
vpinsrd $0, (%r11,%rdi,4), %xmm11, %xmm4
vpxor %xmm3, %xmm3, %xmm3
addq $2, %rdx
vpaddd %xmm0, %xmm1, %xmm5
vpminsd %xmm4, %xmm5, %xmm6
vpsubd %xmm2, %xmm6, %xmm13
vpmaxsd %xmm3, %xmm13, %xmm14
vmovd %xmm14, (%r9,%rdi,4)
cmpq %rdx, %r10
jle .Lmy168
vpinsrd $0, (%r9,%rdx,4), %xmm3, %xmm15
vpaddd %xmm15, %xmm1, %xmm7
vpinsrd $0, (%r11,%rdx,4), %xmm3, %xmm1
vpminsd %xmm1, %xmm7, %xmm8
vpsubd %xmm2, %xmm8, %xmm2
vpmaxsd %xmm3, %xmm2, %xmm9
vmovd %xmm9, (%r9,%rdx,4)
.Lmy168:
popq %rbx
popq %r12
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.p2align 4,,10
.p2align 3
.Lmy173:
.cfi_restore_state
vpaddd (%rcx), %ymm5, %ymm7
vpminsd (%rsi), %ymm7, %ymm8
movl $32, %eax
vpsubd %ymm4, %ymm8, %ymm9
vpmaxsd %ymm3, %ymm9, %ymm10
vmovdqu %ymm10, (%rcx)
jmp .Lmy155
.p2align 4,,10
.p2align 3
.Lmy170:
.cfi_def_cfa 7, 8
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
ret
.p2align 4,,10
.p2align 3
.Lmy174:
.cfi_def_cfa 6, 16
.cfi_offset 3, -32
.cfi_offset 6, -16
.cfi_offset 12, -24
vzeroupper
popq %rbx
popq %r12
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.Lmy123:
.cfi_restore_state
xorl %esi, %esi
leaq a(%rip), %r9
leaq c(%rip), %r11
jmp .Lmy118
.cfi_endproc
.LmyFE9797:
.size _Z6addsubiiii, .-_Z6addsubiiii
.p2align 4
.globl _Z6subaddiiii
.type _Z6subaddiiii, @function
_Z6subaddiiii:
.LmyFB9798:
.cfi_startproc
vmovd %edx, %xmm1
movslq %esi, %r10
movslq %edi, %rdx
cmpq %r10, %rdx
jge .Lmy229
pushq %rbp
.cfi_def_cfa_offset 16
.cfi_offset 6, -16
movq %r10, %rdi
vmovd %ecx, %xmm2
vmovdqa %xmm1, %xmm6
subq %rdx, %rdi
vmovdqa %xmm2, %xmm7
leaq -1(%rdi), %rax
movq %rsp, %rbp
.cfi_def_cfa_register 6
pushq %r12
pushq %rbx
.cfi_offset 12, -24
.cfi_offset 3, -32
movq %rdx, %rbx
cmpq $6, %rax
jbe .Lmy182
movq %rdi, %r12
leaq a(%rip), %r9
vpxor %xmm3, %xmm3, %xmm3
xorl %eax, %eax
shrq $3, %r12
vpbroadcastd %xmm1, %ymm4
vpbroadcastd %xmm2, %ymm0
salq $5, %r12
leaq 0(,%rdx,4), %rsi
leaq c(%rip), %r11
leaq -32(%r12), %r8
leaq (%r9,%rsi), %rcx
addq %r11, %rsi
shrq $5, %r8
addq $1, %r8
andl $7, %r8d
je .Lmy178
cmpq $1, %r8
je .Lmy209
cmpq $2, %r8
je .Lmy210
cmpq $3, %r8
je .Lmy211
cmpq $4, %r8
je .Lmy212
cmpq $5, %r8
je .Lmy213
cmpq $6, %r8
jne .Lmy232
.Lmy214:
vmovdqu (%rcx,%rax), %ymm12
vpsubd %ymm4, %ymm12, %ymm13
vpmaxsd %ymm3, %ymm13, %ymm14
vpaddd %ymm0, %ymm14, %ymm15
vpminsd (%rsi,%rax), %ymm15, %ymm5
vmovdqu %ymm5, (%rcx,%rax)
addq $32, %rax
.Lmy213:
vmovdqu (%rcx,%rax), %ymm8
vpsubd %ymm4, %ymm8, %ymm9
vpmaxsd %ymm3, %ymm9, %ymm10
vpaddd %ymm0, %ymm10, %ymm11
vpminsd (%rsi,%rax), %ymm11, %ymm12
vmovdqu %ymm12, (%rcx,%rax)
addq $32, %rax
.Lmy212:
vmovdqu (%rcx,%rax), %ymm13
vpsubd %ymm4, %ymm13, %ymm14
vpmaxsd %ymm3, %ymm14, %ymm15
vpaddd %ymm0, %ymm15, %ymm5
vpminsd (%rsi,%rax), %ymm5, %ymm8
vmovdqu %ymm8, (%rcx,%rax)
addq $32, %rax
.Lmy211:
vmovdqu (%rcx,%rax), %ymm9
vpsubd %ymm4, %ymm9, %ymm10
vpmaxsd %ymm3, %ymm10, %ymm11
vpaddd %ymm0, %ymm11, %ymm12
vpminsd (%rsi,%rax), %ymm12, %ymm13
vmovdqu %ymm13, (%rcx,%rax)
addq $32, %rax
.Lmy210:
vmovdqu (%rcx,%rax), %ymm14
vpsubd %ymm4, %ymm14, %ymm15
vpmaxsd %ymm3, %ymm15, %ymm5
vpaddd %ymm0, %ymm5, %ymm8
vpminsd (%rsi,%rax), %ymm8, %ymm9
vmovdqu %ymm9, (%rcx,%rax)
addq $32, %rax
.Lmy209:
vmovdqu (%rcx,%rax), %ymm10
vpsubd %ymm4, %ymm10, %ymm11
vpmaxsd %ymm3, %ymm11, %ymm12
vpaddd %ymm0, %ymm12, %ymm13
vpminsd (%rsi,%rax), %ymm13, %ymm14
vmovdqu %ymm14, (%rcx,%rax)
addq $32, %rax
cmpq %rax, %r12
je .Lmy225
.Lmy178:
vmovdqu (%rcx,%rax), %ymm15
vmovdqu 32(%rcx,%rax), %ymm11
vpsubd %ymm4, %ymm15, %ymm5
vpsubd %ymm4, %ymm11, %ymm12
vpmaxsd %ymm3, %ymm5, %ymm8
vmovdqu 64(%rcx,%rax), %ymm5
vpmaxsd %ymm3, %ymm12, %ymm13
vmovdqu 96(%rcx,%rax), %ymm12
vpaddd %ymm0, %ymm8, %ymm9
vpminsd (%rsi,%rax), %ymm9, %ymm10
vpaddd %ymm0, %ymm13, %ymm14
vpsubd %ymm4, %ymm5, %ymm8
vmovdqu %ymm10, (%rcx,%rax)
vpminsd 32(%rsi,%rax), %ymm14, %ymm15
vpmaxsd %ymm3, %ymm8, %ymm9
vmovdqu 128(%rcx,%rax), %ymm8
vpaddd %ymm0, %ymm9, %ymm10
vpsubd %ymm4, %ymm12, %ymm13
vmovdqu %ymm15, 32(%rcx,%rax)
vpmaxsd %ymm3, %ymm13, %ymm14
vpsubd %ymm4, %ymm8, %ymm9
vmovdqu 160(%rcx,%rax), %ymm13
vpminsd 64(%rsi,%rax), %ymm10, %ymm11
vpaddd %ymm0, %ymm14, %ymm15
vpmaxsd %ymm3, %ymm9, %ymm10
vpsubd %ymm4, %ymm13, %ymm14
vmovdqu 192(%rcx,%rax), %ymm9
vmovdqu %ymm11, 64(%rcx,%rax)
vpminsd 96(%rsi,%rax), %ymm15, %ymm5
vpaddd %ymm0, %ymm10, %ymm11
vpmaxsd %ymm3, %ymm14, %ymm15
vpsubd %ymm4, %ymm9, %ymm10
vmovdqu 224(%rcx,%rax), %ymm14
vmovdqu %ymm5, 96(%rcx,%rax)
vpaddd %ymm0, %ymm15, %ymm5
vpminsd 128(%rsi,%rax), %ymm11, %ymm12
vpmaxsd %ymm3, %ymm10, %ymm11
vpsubd %ymm4, %ymm14, %ymm15
vmovdqu %ymm12, 128(%rcx,%rax)
vpaddd %ymm0, %ymm11, %ymm12
vpminsd 160(%rsi,%rax), %ymm5, %ymm8
vpmaxsd %ymm3, %ymm15, %ymm5
vmovdqu %ymm8, 160(%rcx,%rax)
vpaddd %ymm0, %ymm5, %ymm8
vpminsd 192(%rsi,%rax), %ymm12, %ymm13
vmovdqu %ymm13, 192(%rcx,%rax)
vpminsd 224(%rsi,%rax), %ymm8, %ymm9
vmovdqu %ymm9, 224(%rcx,%rax)
addq $256, %rax
cmpq %rax, %r12
jne .Lmy178
.Lmy225:
movq %rdi, %rsi
andq $-8, %rsi
addq %rsi, %rdx
testb $7, %dil
je .Lmy233
vzeroupper
.Lmy177:
subq %rsi, %rdi
leaq -1(%rdi), %rcx
cmpq $2, %rcx
jbe .Lmy180
addq %rbx, %rsi
vpshufd $0, %xmm6, %xmm6
vpxor %xmm3, %xmm3, %xmm3
movq %rdi, %r12
leaq (%r9,%rsi,4), %rbx
vpshufd $0, %xmm7, %xmm7
andq $-4, %r12
vmovdqu (%rbx), %xmm4
addq %r12, %rdx
andl $3, %edi
vpsubd %xmm6, %xmm4, %xmm0
vpmaxsd %xmm3, %xmm0, %xmm10
vpaddd %xmm7, %xmm10, %xmm11
vpminsd (%r11,%rsi,4), %xmm11, %xmm12
vmovdqu %xmm12, (%rbx)
je .Lmy227
.Lmy180:
vmovd (%r9,%rdx,4), %xmm13
vpxor %xmm15, %xmm15, %xmm15
leaq 1(%rdx), %rdi
vpsubd %xmm1, %xmm13, %xmm14
vpinsrd $0, (%r11,%rdx,4), %xmm15, %xmm9
vpmaxsd %xmm15, %xmm14, %xmm5
vpaddd %xmm2, %xmm5, %xmm8
vpminsd %xmm9, %xmm8, %xmm6
vmovd %xmm6, (%r9,%rdx,4)
cmpq %rdi, %r10
jle .Lmy227
vmovd (%r9,%rdi,4), %xmm4
vpxor %xmm3, %xmm3, %xmm3
addq $2, %rdx
vpsubd %xmm1, %xmm4, %xmm0
vpinsrd $0, (%r11,%rdi,4), %xmm3, %xmm11
vpmaxsd %xmm3, %xmm0, %xmm10
vpaddd %xmm2, %xmm10, %xmm7
vpminsd %xmm11, %xmm7, %xmm12
vmovd %xmm12, (%r9,%rdi,4)
cmpq %rdx, %r10
jle .Lmy227
vmovd (%r9,%rdx,4), %xmm13
vpsubd %xmm1, %xmm13, %xmm14
vpxor %xmm1, %xmm1, %xmm1
vpinsrd $0, (%r11,%rdx,4), %xmm1, %xmm5
vpmaxsd %xmm1, %xmm14, %xmm15
vpaddd %xmm2, %xmm15, %xmm2
vpminsd %xmm5, %xmm2, %xmm8
vmovd %xmm8, (%r9,%rdx,4)
.Lmy227:
popq %rbx
popq %r12
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.p2align 4,,10
.p2align 3
.Lmy232:
.cfi_restore_state
vmovdqu (%rcx), %ymm5
movl $32, %eax
vpsubd %ymm4, %ymm5, %ymm8
vpmaxsd %ymm3, %ymm8, %ymm9
vpaddd %ymm0, %ymm9, %ymm10
vpminsd (%rsi), %ymm10, %ymm11
vmovdqu %ymm11, (%rcx)
jmp .Lmy214
.p2align 4,,10
.p2align 3
.Lmy229:
.cfi_def_cfa 7, 8
.cfi_restore 3
.cfi_restore 6
.cfi_restore 12
ret
.p2align 4,,10
.p2align 3
.Lmy233:
.cfi_def_cfa 6, 16
.cfi_offset 3, -32
.cfi_offset 6, -16
.cfi_offset 12, -24
vzeroupper
popq %rbx
popq %r12
popq %rbp
.cfi_remember_state
.cfi_def_cfa 7, 8
ret
.Lmy182:
.cfi_restore_state
xorl %esi, %esi
leaq a(%rip), %r9
leaq c(%rip), %r11
jmp .Lmy177
.cfi_endproc
.LmyFE9798:
.size _Z6subaddiiii, .-_Z6subaddiiii
)dard");
//void add(int l, int r, int x)
//{
// Loop (i,l,r)
// a[i] = (a[i] + x > c[i]? c[i]: a[i] + x);
//}
//void sub(int l, int r, int x)
//{
// Loop (i,l,r)
// a[i] = (a[i] - x < 0? 0: a[i] - x);
//}
//void addsub(int l, int r, int x, int y)
//{
// Loop (i,l,r) {
// a[i] = (a[i] + x > c[i]? c[i]: a[i] + x);
// a[i] = (a[i] - y < 0? 0: a[i] - y);
// }
//}
//void subadd(int l, int r, int x, int y)
//{
// Loop (i,l,r) {
// a[i] = (a[i] - x < 0? 0: a[i] - x);
// a[i] = (a[i] + y > c[i]? c[i]: a[i] + y);
// }
//}
void up(int l, int r, int x)
{
if (x > 0)
add(l, r, x);
else
sub(l, r, -x);
}
void upup(int l, int r, int x, int y)
{
if (x > 0 && y > 0)
add(l, r, min(inf, x+y));
if (x <= 0 && y <= 0)
sub(l, r, min(inf, -(x+y)));
if (x > 0 && y <= 0)
addsub(l, r, x, -y);
if (x <= 0 && y > 0)
subadd(l, r, -x, y);
}
std::vector<int> distribute_candies(std::vector<int> _c, std::vector<int> ql,
std::vector<int> qr, std::vector<int> qv) {
int n = _c.size();
int q = ql.size();
for (int &x : qr)
++x;
if (q%2) {
ql.push_back(0);
qr.push_back(1);
qv.push_back(0);
++q;
}
Loop (i,0,n)
c[i] = _c[i];
for (int L = 0; L < n; L += S) {
int R = min<int>(L+S, n);
for (int i = 0; i < q; i += 2) {
int l0 = ql[i], l1 = ql[i+1];
int r0 = qr[i], r1 = qr[i+1];
int v0 = qv[i], v1 = qv[i+1];
if (l0 <= L && R <= r0 && l1 <= L && R <= r1) {
upup(L, R, v0, v1);
} else {
up(max(l0, L), min(r0, R), v0);
up(max(l1, L), min(r1, R), v1);
}
}
}
return vector<int>(a, a+n);
}
# |
Verdict |
Execution time |
Memory |
Grader output |
1 |
Correct |
0 ms |
212 KB |
Output is correct |
2 |
Correct |
1 ms |
212 KB |
Output is correct |
3 |
Correct |
1 ms |
276 KB |
Output is correct |
4 |
Correct |
1 ms |
340 KB |
Output is correct |
5 |
Correct |
1 ms |
340 KB |
Output is correct |
# |
Verdict |
Execution time |
Memory |
Grader output |
1 |
Correct |
909 ms |
8824 KB |
Output is correct |
2 |
Correct |
876 ms |
8840 KB |
Output is correct |
3 |
Correct |
914 ms |
8904 KB |
Output is correct |
# |
Verdict |
Execution time |
Memory |
Grader output |
1 |
Correct |
1 ms |
212 KB |
Output is correct |
2 |
Correct |
59 ms |
5048 KB |
Output is correct |
3 |
Correct |
58 ms |
5292 KB |
Output is correct |
4 |
Correct |
824 ms |
8896 KB |
Output is correct |
5 |
Correct |
873 ms |
8928 KB |
Output is correct |
6 |
Correct |
832 ms |
8904 KB |
Output is correct |
7 |
Correct |
793 ms |
8924 KB |
Output is correct |
# |
Verdict |
Execution time |
Memory |
Grader output |
1 |
Correct |
0 ms |
256 KB |
Output is correct |
2 |
Correct |
1 ms |
212 KB |
Output is correct |
3 |
Correct |
52 ms |
4960 KB |
Output is correct |
4 |
Correct |
53 ms |
4284 KB |
Output is correct |
5 |
Correct |
1360 ms |
8864 KB |
Output is correct |
6 |
Correct |
1332 ms |
8912 KB |
Output is correct |
7 |
Correct |
1297 ms |
8904 KB |
Output is correct |
8 |
Correct |
1297 ms |
8900 KB |
Output is correct |
9 |
Correct |
1314 ms |
8908 KB |
Output is correct |
# |
Verdict |
Execution time |
Memory |
Grader output |
1 |
Correct |
0 ms |
212 KB |
Output is correct |
2 |
Correct |
1 ms |
212 KB |
Output is correct |
3 |
Correct |
1 ms |
276 KB |
Output is correct |
4 |
Correct |
1 ms |
340 KB |
Output is correct |
5 |
Correct |
1 ms |
340 KB |
Output is correct |
6 |
Correct |
909 ms |
8824 KB |
Output is correct |
7 |
Correct |
876 ms |
8840 KB |
Output is correct |
8 |
Correct |
914 ms |
8904 KB |
Output is correct |
9 |
Correct |
1 ms |
212 KB |
Output is correct |
10 |
Correct |
59 ms |
5048 KB |
Output is correct |
11 |
Correct |
58 ms |
5292 KB |
Output is correct |
12 |
Correct |
824 ms |
8896 KB |
Output is correct |
13 |
Correct |
873 ms |
8928 KB |
Output is correct |
14 |
Correct |
832 ms |
8904 KB |
Output is correct |
15 |
Correct |
793 ms |
8924 KB |
Output is correct |
16 |
Correct |
0 ms |
256 KB |
Output is correct |
17 |
Correct |
1 ms |
212 KB |
Output is correct |
18 |
Correct |
52 ms |
4960 KB |
Output is correct |
19 |
Correct |
53 ms |
4284 KB |
Output is correct |
20 |
Correct |
1360 ms |
8864 KB |
Output is correct |
21 |
Correct |
1332 ms |
8912 KB |
Output is correct |
22 |
Correct |
1297 ms |
8904 KB |
Output is correct |
23 |
Correct |
1297 ms |
8900 KB |
Output is correct |
24 |
Correct |
1314 ms |
8908 KB |
Output is correct |
25 |
Correct |
0 ms |
212 KB |
Output is correct |
26 |
Correct |
46 ms |
4240 KB |
Output is correct |
27 |
Correct |
52 ms |
5044 KB |
Output is correct |
28 |
Correct |
819 ms |
8900 KB |
Output is correct |
29 |
Correct |
819 ms |
8884 KB |
Output is correct |
30 |
Correct |
833 ms |
8988 KB |
Output is correct |
31 |
Correct |
812 ms |
8840 KB |
Output is correct |
32 |
Correct |
842 ms |
8852 KB |
Output is correct |