n, c, h, w = img.size()
count += n
print(count)
ff = torch.FloatTensor(n,512).zero_()
if opt.PCB:
ff = torch.FloatTensor(n,2048,6).zero_() // we have six parts
for i in range(2):
if(i==1):
img = fliplr(img)
input_img = Variable(img.cuda())
//if opt.fp16:
// input_img = input_img.half()
outputs = model(input_img)
f = outputs.data.cpu().float()
ff = ff+f
// norm feature
if opt.PCB:
// feature size (n,2048,6)
// 1. To treat every part equally, I calculate the norm for every 2048-dim part feature.
// 2. To keep the cosine score==1, sqrt(6) is added to norm the whole feature (2048*6).
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) * np.sqrt(6)
ff = ff.div(fnorm.expand_as(ff))
ff = ff.view(ff.size(0), -1)
else:
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
ff = ff.div(fnorm.expand_as(ff))
features = torch.cat((features,ff), 0)
return features
def get_id(img_path):
After Change
n, c, h, w = img.size()
count += n
print(count)
ff = torch.FloatTensor(n,512).zero_().cuda()
if opt.PCB:
ff = torch.FloatTensor(n,2048,6).zero_().cuda() // we have six parts
for i in range(2):
if(i==1):
img = fliplr(img)
input_img = Variable(img.cuda())
for scale in ms:
if scale != 1:
// bicubic is only available in pytorch> 1.1
input_img = nn.functional.interpolate(input_img, scale_factor=scale, mode="bicubic", align_corners=False)
outputs = model(input_img)
ff += outputs
// norm feature
if opt.PCB:
// feature size (n,2048,6)
// 1. To treat every part equally, I calculate the norm for every 2048-dim part feature.
// 2. To keep the cosine score==1, sqrt(6) is added to norm the whole feature (2048*6).
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) * np.sqrt(6)
ff = ff.div(fnorm.expand_as(ff))
ff = ff.view(ff.size(0), -1)
else:
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
ff = ff.div(fnorm.expand_as(ff))
features = torch.cat((features,ff.data.cpu()), 0)
return features
def get_id(img_path):