9520f251808add32cd7c6b36f65fc6e031eaa3ff,nni/algorithms/compression/pytorch/quantization/quantizers.py,,update_quantization_param,#,68

Before Change


    // extend the [min, max] interval to ensure that it contains 0.
    // Otherwise, we would not meet the requirement that 0 be an exactly
    // representable value.
    if rmin.is_cuda:
        rmin = torch.min(rmin, torch.Tensor([0]).cuda())
        rmax = torch.max(rmax, torch.Tensor([0]).cuda())
        qmin = torch.Tensor([0]).cuda()
        qmax = torch.Tensor([(1 << bits) - 1]).cuda()
    else:
        rmin = torch.min(rmin, torch.Tensor([0]))
        rmax = torch.max(rmax, torch.Tensor([0]))
        qmin = torch.Tensor([0])
        qmax = torch.Tensor([(1 << bits) - 1])

    // First determine the scale.
    scale = (rmax - rmin) / (qmax - qmin)

    // Zero-point computation.
    initial_zero_point = qmin - rmin / scale

    // Now we need to nudge the zero point to be an integer
    nudged_zero_point = 0
    if initial_zero_point < qmin:
        nudged_zero_point = qmin
    elif initial_zero_point > qmax:
        nudged_zero_point = qmax
    else:
        nudged_zero_point = torch.round(initial_zero_point)

    return scale, nudged_zero_point

After Change


    // extend the [min, max] interval to ensure that it contains 0.
    // Otherwise, we would not meet the requirement that 0 be an exactly
    // representable value.
    rmin = torch.min(rmin, torch.Tensor([0]).to(rmin.device))
    rmax = torch.max(rmax, torch.Tensor([0]).to(rmin.device))
    qmin = torch.Tensor([0]).to(rmin.device)
    qmax = torch.Tensor([(1 << bits) - 1]).to(rmin.device)

    // First determine the scale.
    scale = (rmax - rmin) / (qmax - qmin)

    // Zero-point computation.
    initial_zero_point = qmin - rmin / scale

    // Now we need to nudge the zero point to be an integer
    if initial_zero_point < qmin:
        nudged_zero_point = qmin
    elif initial_zero_point > qmax:
        nudged_zero_point = qmax
    else:
        nudged_zero_point = torch.round(initial_zero_point)

    return scale, nudged_zero_point
Italian Trulli
In pattern: SUPERPATTERN

Frequency: 3

Non-data size: 9

Instances


Project Name: microsoft/nni
Commit Name: 9520f251808add32cd7c6b36f65fc6e031eaa3ff
Time: 2020-12-22
Author: 39682259+eedalong@users.noreply.github.com
File Name: nni/algorithms/compression/pytorch/quantization/quantizers.py
Class Name:
Method Name: update_quantization_param