题意:大小为n的序列a和b中,求(l,r)的个数,使得max(a[l..r]) = min(b[l...r])。
解析:在固定l后,随着r的增加,max(a[l..r]) - min(b[l...r])是不减的。所以可以通过二分求得max(a[l..r]) = min(b[l...r])的两个边界。
[code]:
#include<cstdio>
#include<cstring>
#include<algorithm>
#include<vector>
using namespace std;
typedef long long LL;
const int maxn = 2e5+5;
int n,a[maxn],b[maxn];
int mi[21][maxn],mx[21][maxn];
int tlog[maxn],p[21];
void init(){
int i,j;
p[0] = 1;tlog[0] = -1;
for(i = 1;i <= 20;i++) p[i] = 2*p[i-1];
for(i = 1;i <= n;i++){
tlog[i] = (i&(i-1))?tlog[i-1]:(tlog[i-1]+1);
}
for(i = 1;i <= n;i++) mi[0][i] = b[i];
for(j = 1;p[j] <= n;j++){
for(i = 1;i+p[j]-1<=n;i++)
mi[j][i] = min(mi[j-1][i],mi[j-1][i+p[j-1]]);
}
for(i = 1;i <= n;i++) mx[0][i] = a[i];
for(j = 1;p[j] <= n;j++){
for(i = 1;i+p[j]-1<=n;i++)
mx[j][i] = max(mx[j-1][i],mx[j-1][i+p[j-1]]);
}
}
int get_mx(int l,int r){
int k = tlog[r-l+1];
return max(mx[k][l],mx[k][r-p[k]+1]);
}
int get_mi(int l,int r){
int k = tlog[r-l+1];
return min(mi[k][l],mi[k][r-p[k]+1]);
}
int lower(int p){
int i,j,lb,rb,mid;
lb = p-1,rb = n+1;
while(rb-lb>1){
mid = (lb+rb)>>1;
if(get_mx(p,mid)>=get_mi(p,mid)) rb = mid;
else lb = mid;
}
return rb;
}
int upper(int p){
int i,j,lb,rb,mid;
lb = p-1,rb = n+1;
while(rb-lb>1){
mid = (lb+rb)>>1;
if(get_mx(p,mid)>get_mi(p,mid)) rb = mid;
else lb = mid;
}
return rb;
}
int main(){
int i,j,cas;
scanf("%d",&n);
for(i = 1;i <= n;i++){
scanf("%d",&a[i]);
}
for(i = 1;i <= n;i++){
scanf("%d",&b[i]);
}
init();
LL ans = 0;
int l,r;
for(i = 1;i <= n;i++){
l = lower(i),r = upper(i);
ans += r-l;
}
printf("%I64d\n",ans);
return 0;
}