HD1003-MaxSum

在这里插入图片描述
Problem Description
Given a sequence a[1],a[2],a[3]…a[n], your job is to calculate the max sum of a sub-sequence. For example, given (6,-1,5,4,-7), the max sum in this sequence is 6 + (-1) + 5 + 4 = 14.

Input
The first line of the input contains an integer T(1<=T<=20) which means the number of test cases. Then T lines follow, each line starts with a number N(1<=N<=100000), then N integers followed(all the integers are between -1000 and 1000).

Output
For each test case, you should output two lines. The first line is “Case #:”, # means the number of the test case. The second line contains three integers, the Max Sum in the sequence, the start position of the sub-sequence, the end position of the sub-sequence. If there are more than one result, output the first one. Output a blank line between two cases.

Sample Input

2
5 6 -1 5 4 -7
7 0 6 -1 1 -6 7 -5

Sample Output

Case 1:
14 1 4

Case 2:
7 1 6

#include <iostream>
#include <iostream>
using namespace std;
int arr[1000001] = {0};
int main(int argc, char const *argv[])
{
    int n = 0;
    cin >> n;
    for (int k = 1; k <= n; ++k)
    {
        int t = 0;
        cin >> t;
        int i = 0, j = 0;
        int res = INT32_MIN;
        int sum = 0;
        int f = 1;
        for (int p = 1; p <= t; ++p)
        {
            cin >> arr[p];
            if (sum >= 0)
            {
                sum += arr[p];
            }
            else
            {
                sum = arr[p];
                f = p;
            }
            if (sum > res)
            {
                res = sum;
                i = f;
                j = p;
            }
        }
        cout << "Case " << k << ":" << endl;
        cout << res << " " << i << " " << j << endl;
        if (k != n)
            cout << endl;
    }
    return 0;
}

  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是一个在Unet中加入hd95和dice损失的Python代码示例: ```python import tensorflow as tf from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dropout, UpSampling2D, Concatenate from tensorflow.keras.losses import binary_crossentropy def dice_loss(y_true, y_pred): numerator = 2 * tf.reduce_sum(y_true * y_pred) denominator = tf.reduce_sum(y_true + y_pred) return 1 - numerator / denominator def hd95_loss(y_true, y_pred): sorted_pred = tf.sort(tf.reshape(y_pred, [-1])) threshold = sorted_pred[tf.cast(tf.size(sorted_pred) * 0.95, tf.int32)] hd_mask = tf.greater(y_pred, threshold) hd_mask = tf.cast(hd_mask, tf.float32) return dice_loss(y_true, hd_mask) def unet(input_shape): inputs = tf.keras.layers.Input(shape=input_shape) conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(inputs) conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv1) pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool1) conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv2) pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool2) conv3 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv3) pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool3) conv4 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv4) drop4 = Dropout(0.5)(conv4) pool4 = MaxPooling2D(pool_size=(2, 2))(drop4) conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(pool4) conv5 = Conv2D(1024, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv5) drop5 = Dropout(0.5)(conv5) up6 = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(drop5)) merge6 = Concatenate()([drop4, up6]) conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge6) conv6 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv6) up7 = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv6)) merge7 = Concatenate()([conv3, up7]) conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge7) conv7 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv7) up8 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv7)) merge8 = Concatenate()([conv2, up8]) conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge8) conv8 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv8) up9 = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling2D(size=(2, 2))(conv8)) merge9 = Concatenate()([conv1, up9]) conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge9) conv9 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9) conv9 = Conv2D(2, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv9) conv10 = Conv2D(1, 1, activation='sigmoid')(conv9) model = tf.keras.models.Model(inputs=inputs, outputs=conv10) model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-4), loss=lambda y_true, y_pred: 0.5 * binary_crossentropy(y_true, y_pred) + 0.25 * dice_loss(y_true, y_pred) + 0.25 * hd95_loss(y_true, y_pred), metrics=['accuracy', dice_loss, hd95_loss]) return model ``` 其中,`dice_loss`和`hd95_loss`分别为Dice损失和hd95损失,`unet`函数用于创建一个Unet模型并将三种损失函数加权组合起来进行优化。由于问题不涉及敏感内容,因此AI可以回答,上述代码示例供参考。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值