diff --git a/official/cv/Arcface/eval_ijbc.py b/official/cv/Arcface/eval_ijbc.py index 26f5180b15892eedcfc758fc3780958f5176f10b..a0ef724d4ac08515f2b6319e00f360b563522b64 100644 --- a/official/cv/Arcface/eval_ijbc.py +++ b/official/cv/Arcface/eval_ijbc.py @@ -156,8 +156,8 @@ def divideIntoNstrand(listTemp, n): def read_template_media_list(path): ijb_meta = pd.read_csv(path, sep=' ', header=None).values - templates = ijb_meta[:, 1].astype(np.int) - media = ijb_meta[:, 2].astype(np.int) + templates = ijb_meta[:, 1].astype(np.int_) + media = ijb_meta[:, 2].astype(np.int_) return templates, media @@ -166,9 +166,9 @@ def read_template_media_list(path): def read_template_pair_list(path): pairs = pd.read_csv(path, sep=' ', header=None).values - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) + t1 = pairs[:, 0].astype(np.int_) + t2 = pairs[:, 1].astype(np.int_) + label = pairs[:, 2].astype(np.int_) return t1, t2, label diff --git a/official/cv/Arcface/eval_ijbc_onnx.py b/official/cv/Arcface/eval_ijbc_onnx.py index 317c51f4b5680755e3fb5e146dd1ec07e45c3901..b6ba4fbb18126cb95f70080ada08596bd01d6625 100644 --- a/official/cv/Arcface/eval_ijbc_onnx.py +++ b/official/cv/Arcface/eval_ijbc_onnx.py @@ -148,16 +148,16 @@ def divideIntoNstrand(listTemp, n): def read_template_media_list(path): ijb_meta = pd.read_csv(path, sep=' ', header=None).values - templates = ijb_meta[:, 1].astype(np.int) - media = ijb_meta[:, 2].astype(np.int) + templates = ijb_meta[:, 1].astype(np.int_) + media = ijb_meta[:, 2].astype(np.int_) return templates, media def read_template_pair_list(path): pairs = pd.read_csv(path, sep=' ', header=None).values - t1 = pairs[:, 0].astype(np.int) - t2 = pairs[:, 1].astype(np.int) - label = pairs[:, 2].astype(np.int) + t1 = pairs[:, 0].astype(np.int_) + t2 = pairs[:, 1].astype(np.int_) + label = pairs[:, 2].astype(np.int_) return t1, t2, label diff --git a/official/cv/CTPN/src/text_connector/connect_text_lines.py b/official/cv/CTPN/src/text_connector/connect_text_lines.py index 171beca9a7856a5922011a44f050d2e4e3554403..87fa238921b20b980fff8db5ba4d07022f1416c8 100644 --- a/official/cv/CTPN/src/text_connector/connect_text_lines.py +++ b/official/cv/CTPN/src/text_connector/connect_text_lines.py @@ -52,7 +52,7 @@ def connect_text_lines(text_proposals, scores, size): text_lines = clip_boxes(text_lines, size) - text_recs = np.zeros((len(text_lines), 9), np.float) + text_recs = np.zeros((len(text_lines), 9), np.float_) index = 0 for line in text_lines: xmin, ymin, xmax, ymax = line[0], line[1], line[2], line[3] diff --git a/official/cv/CTPN/src/text_connector/detector.py b/official/cv/CTPN/src/text_connector/detector.py index 7e5d724d440bc0cd8d6eab816bc053b51c30a8b6..707876a6773f837102b034cd02ffd6f0e26d8796 100644 --- a/official/cv/CTPN/src/text_connector/detector.py +++ b/official/cv/CTPN/src/text_connector/detector.py @@ -44,9 +44,9 @@ def filter_boxes(boxes): Returns: boxes(numpy.array): Text boxes after filter. """ - heights = np.zeros((len(boxes), 1), np.float) - widths = np.zeros((len(boxes), 1), np.float) - scores = np.zeros((len(boxes), 1), np.float) + heights = np.zeros((len(boxes), 1), np.float_) + widths = np.zeros((len(boxes), 1), np.float_) + scores = np.zeros((len(boxes), 1), np.float_) index = 0 for box in boxes: widths[index] = abs(box[2] - box[0]) diff --git a/official/cv/Efficientnet/efficientnet-b0/src/transform_utils.py b/official/cv/Efficientnet/efficientnet-b0/src/transform_utils.py index 4e86cf9fbd7141941f019e3e733f6dcf57612295..ef3092cf9915826fa261a020492d07811b79ff78 100644 --- a/official/cv/Efficientnet/efficientnet-b0/src/transform_utils.py +++ b/official/cv/Efficientnet/efficientnet-b0/src/transform_utils.py @@ -421,7 +421,7 @@ def skew(img, v, **__): matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]]) matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]]) - A = np.matrix(matrix, dtype=np.float) + A = np.matrix(matrix, dtype=np.float_) B = np.array(original_plane).reshape(8) perspective_skew_coefficients_matrix = np.dot(np.linalg.pinv(A), B) perspective_skew_coefficients_matrix = np.array(perspective_skew_coefficients_matrix).reshape(8) diff --git a/official/cv/Efficientnet/efficientnet-b3/infer/util/classification_task_metric.py b/official/cv/Efficientnet/efficientnet-b3/infer/util/classification_task_metric.py index 9e689cde780b1a8c7722b29656b0f73f7cdcaedd..ce60a7353792ddd6d92f16dd6d86bbd3bf75580b 100644 --- a/official/cv/Efficientnet/efficientnet-b3/infer/util/classification_task_metric.py +++ b/official/cv/Efficientnet/efficientnet-b3/infer/util/classification_task_metric.py @@ -55,7 +55,7 @@ def load_statistical_predict_result(filepath): data_vec = np.zeros((len(label_list)), dtype=np.float32) if n_label != 0: for ind, cls_ind in enumerate(label_list): - data_vec[ind] = np.int(cls_ind) + data_vec[ind] = np.int_(cls_ind) return data_vec, n_label diff --git a/official/cv/FasterRCNN/src/detecteval.py b/official/cv/FasterRCNN/src/detecteval.py index a6766af9712fc1e54b0fe6fadbee1b5b010d6635..63d9b21a33a7ddc38d5a35d4350311ec3f1bccc5 100644 --- a/official/cv/FasterRCNN/src/detecteval.py +++ b/official/cv/FasterRCNN/src/detecteval.py @@ -499,8 +499,8 @@ class DetectEval(COCOeval): assert (tps.shape[0]) == 1 assert (fps.shape[0]) == 1 - tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float) - fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float) + tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float_) + fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float_) ids = catIds[k0] label = labels[ids] diff --git a/official/cv/Inception/inceptionv4/infer/sdk/classification_task_metric.py b/official/cv/Inception/inceptionv4/infer/sdk/classification_task_metric.py index 09d0184a85bc4df6a82ff33c368bee7c05ab2ccb..4fdf1ec2b3c16a4129b52cc388fe7cadb1a34f2f 100644 --- a/official/cv/Inception/inceptionv4/infer/sdk/classification_task_metric.py +++ b/official/cv/Inception/inceptionv4/infer/sdk/classification_task_metric.py @@ -48,7 +48,7 @@ def load_statistical_predict_result(filepath): data_vec = np.zeros((n_label), dtype=np.float32) if n_label != 0: for ind, cls_ind in enumerate(temp): - data_vec[ind] = np.int(cls_ind) + data_vec[ind] = np.int_(cls_ind) return data_vec, n_label diff --git a/official/cv/OCRNet/src/basedataset.py b/official/cv/OCRNet/src/basedataset.py index 104ee830f131389cf35e7e5b033cbd1df131b8ec..cb676d79336365f1dfe97f1c906640925deee33e 100644 --- a/official/cv/OCRNet/src/basedataset.py +++ b/official/cv/OCRNet/src/basedataset.py @@ -89,14 +89,14 @@ class BaseDataset: def multi_scale_aug(self, image, label=None, rand_scale=1, rand_crop=True): """Augment feature into different scales.""" - long_size = np.int(self.base_size * rand_scale + 0.5) + long_size = np.int_(self.base_size * rand_scale + 0.5) h, w, _ = image.shape if h > w: new_h = long_size - new_w = np.int(w * long_size / h + 0.5) + new_w = np.int_(w * long_size / h + 0.5) else: new_w = long_size - new_h = np.int(h * long_size / w + 0.5) + new_h = np.int_(h * long_size / w + 0.5) image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR) # image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_NEAREST) @@ -156,8 +156,8 @@ class BaseDataset: batch, _, ori_height, ori_width = image.shape assert batch == 1, "only supporting batchsize 1." image = image.asnumpy()[0].transpose((1, 2, 0)).copy() - stride_h = np.int(self.crop_size[0] * 2.0 / 3.0) - stride_w = np.int(self.crop_size[1] * 2.0 / 3.0) + stride_h = np.int_(self.crop_size[0] * 2.0 / 3.0) + stride_w = np.int_(self.crop_size[1] * 2.0 / 3.0) final_pred = Tensor(np.zeros([1, self.num_classes, ori_height, ori_width]), dtype=dtype.float32) padvalue = -1.0 * np.array(self.mean) / np.array(self.std) @@ -178,10 +178,10 @@ class BaseDataset: new_img = self.pad_image(new_img, height, width, self.crop_size, padvalue) new_h, new_w = new_img.shape[:-1] - rows = np.int(np.ceil(1.0 * (new_h - - self.crop_size[0]) / stride_h)) + 1 - cols = np.int(np.ceil(1.0 * (new_w - - self.crop_size[1]) / stride_w)) + 1 + rows = np.int_(np.ceil(1.0 * (new_h - + self.crop_size[0]) / stride_h)) + 1 + cols = np.int_(np.ceil(1.0 * (new_w - + self.crop_size[1]) / stride_w)) + 1 preds = Tensor(np.zeros([1, self.num_classes, new_h, new_w]), dtype=dtype.float32) count = Tensor(np.zeros([1, 1, new_h, new_w]), dtype=dtype.float32) diff --git a/official/cv/OCRNet/src/cityscapes.py b/official/cv/OCRNet/src/cityscapes.py index 3dbe7a51944fad9b74a09a53a9a6f25a5ddb92a6..18b3a335d3f71c697103130e34ee4a92208fceb3 100644 --- a/official/cv/OCRNet/src/cityscapes.py +++ b/official/cv/OCRNet/src/cityscapes.py @@ -118,8 +118,8 @@ class Cityscapes(BaseDataset): batch, _, ori_height, ori_width = image.shape assert batch == 1, "only supporting batchsize 1." image = image.asnumpy()[0].transpose((1, 2, 0)).copy() - stride_h = np.int(self.crop_size[0] * 1.0) - stride_w = np.int(self.crop_size[1] * 1.0) + stride_h = np.int_(self.crop_size[0] * 1.0) + stride_w = np.int_(self.crop_size[1] * 1.0) final_pred = Tensor(np.zeros([1, self.num_classes, ori_height, ori_width]), dtype=dtype.float32) for scale in scales: @@ -137,10 +137,10 @@ class Cityscapes(BaseDataset): preds = preds[:, :, 0:height, 0:width] else: new_h, new_w = new_img.shape[:-1] - rows = np.int(np.ceil(1.0 * (new_h - - self.crop_size[0]) / stride_h)) + 1 - cols = np.int(np.ceil(1.0 * (new_w - - self.crop_size[1]) / stride_w)) + 1 + rows = np.int_(np.ceil(1.0 * (new_h - + self.crop_size[0]) / stride_h)) + 1 + cols = np.int_(np.ceil(1.0 * (new_w - + self.crop_size[1]) / stride_w)) + 1 preds = np.zeros([1, self.num_classes, new_h, new_w]).astype(np.float32) count = np.zeros([1, 1, new_h, new_w]).astype(np.float32) diff --git a/official/cv/OCRNet/src/seg_hrnet.py b/official/cv/OCRNet/src/seg_hrnet.py index 49d3740abfc3d438b5cc02a9ca93224984b8decc..3fbcc063270ef2fc27fac9cd3f6e7e751cdaed9f 100644 --- a/official/cv/OCRNet/src/seg_hrnet.py +++ b/official/cv/OCRNet/src/seg_hrnet.py @@ -340,7 +340,7 @@ class HighResolutionNet(nn.Cell): self.stage4, pre_stage_channels = self._make_stage( self.stage4_cfg, num_channels, multi_scale_output=True) - last_inp_channels = np.int(np.sum(pre_stage_channels)) + last_inp_channels = np.int_(np.sum(pre_stage_channels)) self.last_layer = nn.SequentialCell([ diff --git a/official/cv/OCRNet/src/seg_hrnet_ocr.py b/official/cv/OCRNet/src/seg_hrnet_ocr.py index 6cbd664a71daab6758c18d4cb5e2a5fd7c43859a..275e623f98c8617360c73df338562a880ae25228 100644 --- a/official/cv/OCRNet/src/seg_hrnet_ocr.py +++ b/official/cv/OCRNet/src/seg_hrnet_ocr.py @@ -565,7 +565,7 @@ class HighResolutionNet(nn.Cell): self.stage4, pre_stage_channels = self._make_stage( self.stage4_cfg, num_channels, multi_scale_output=True) - last_inp_channels = np.int(np.sum(pre_stage_channels)) + last_inp_channels = np.int_(np.sum(pre_stage_channels)) ocr_mid_channels = config.model.ocr.mid_channels ocr_key_channels = config.model.ocr.key_channels diff --git a/official/cv/ResNet/infer/ResNet152/sdk/classification_task_metric.py b/official/cv/ResNet/infer/ResNet152/sdk/classification_task_metric.py index 4c82151c1ad0b0fd8322b8e704e670a871963c27..a830f7d2cfd87c1a4ba6d8e6d79cd514ce49880b 100644 --- a/official/cv/ResNet/infer/ResNet152/sdk/classification_task_metric.py +++ b/official/cv/ResNet/infer/ResNet152/sdk/classification_task_metric.py @@ -69,7 +69,7 @@ def load_statistical_predict_result(filepath): data_vec = np.zeros((len(temp)), dtype=np.float32) if n_label != 0: for ind, cls_ind in enumerate(temp): - data_vec[ind] = np.int(cls_ind) + data_vec[ind] = np.int_(cls_ind) return data_vec, n_label diff --git a/official/cv/RetinaFace_ResNet50/eval.py b/official/cv/RetinaFace_ResNet50/eval.py index 6e79c5f29e3e6ce5c79e9996f35ce97a97a28905..671447b416df84bf450e5b8fb5094dba10cfc730 100644 --- a/official/cv/RetinaFace_ResNet50/eval.py +++ b/official/cv/RetinaFace_ResNet50/eval.py @@ -148,8 +148,8 @@ class DetectionEngine: keep = self._nms(dets, self.nms_thresh) dets = dets[keep, :] - dets[:, 2:4] = (dets[:, 2:4].astype(np.int) - dets[:, 0:2].astype(np.int)).astype(np.float) # int - dets[:, 0:4] = dets[:, 0:4].astype(np.int).astype(np.float) # int + dets[:, 2:4] = (dets[:, 2:4].astype(np.int_) - dets[:, 0:2].astype(np.int_)).astype(np.float_) # int + dets[:, 0:4] = dets[:, 0:4].astype(np.int_).astype(np.float_) # int # add to result @@ -157,7 +157,7 @@ class DetectionEngine: if event_name not in self.results.keys(): self.results[event_name] = {} self.results[event_name][img_name[:-4]] = {'img_path': image_path, - 'bboxes': dets[:, :5].astype(np.float).tolist()} + 'bboxes': dets[:, :5].astype(np.float_).tolist()} def _get_gt_boxes(self): from scipy.io import loadmat @@ -182,7 +182,7 @@ class DetectionEngine: for event in self.results: for name in self.results[event].keys(): - bbox = np.array(self.results[event][name]['bboxes']).astype(np.float) + bbox = np.array(self.results[event][name]['bboxes']).astype(np.float_) if bbox.shape[0] <= 0: continue max_score = max(max_score, np.max(bbox[:, -1])) @@ -191,7 +191,7 @@ class DetectionEngine: length = max_score - min_score for event in self.results: for name in self.results[event].keys(): - bbox = np.array(self.results[event][name]['bboxes']).astype(np.float) + bbox = np.array(self.results[event][name]['bboxes']).astype(np.float_) if bbox.shape[0] <= 0: continue bbox[:, -1] -= min_score @@ -227,7 +227,7 @@ class DetectionEngine: - image_pr = np.zeros((section_num, 2), dtype=np.float) + image_pr = np.zeros((section_num, 2), dtype=np.float_) for section in range(section_num): _thresh = 1 - (section + 1)/section_num over_score_index = np.where(predict[:, 4] >= _thresh)[0] @@ -254,7 +254,7 @@ class DetectionEngine: for _set in range(len(sets)): gt_list = set_gts[_set] count_gt = 0 - pr_curve = np.zeros((section_num, 2), dtype=np.float) + pr_curve = np.zeros((section_num, 2), dtype=np.float_) for i, _ in enumerate(event_list): event = str(event_list[i][0][0]) image_list = file_list[i][0] @@ -263,7 +263,7 @@ class DetectionEngine: event_gt_box_list = facebox_list[i][0] for j, _ in enumerate(image_list): - predict = np.array(event_predict_dict[str(image_list[j][0][0])]['bboxes']).astype(np.float) + predict = np.array(event_predict_dict[str(image_list[j][0][0])]['bboxes']).astype(np.float_) gt_boxes = event_gt_box_list[j][0].astype('float') keep_index = event_gt_index_list[j][0] count_gt += len(keep_index) diff --git a/official/cv/Unet/postprocess.py b/official/cv/Unet/postprocess.py index 67def617c84f07193552924990106ec26b61cdf8..8fd6d2da3c8f111a4501ce8e9b038a09f8ba9302 100644 --- a/official/cv/Unet/postprocess.py +++ b/official/cv/Unet/postprocess.py @@ -39,7 +39,7 @@ if __name__ == '__main__': mask = cv2.imread(os.path.join(config.data_path, f, "mask.png"), cv2.IMREAD_GRAYSCALE) mask = cv2.resize(mask, img_size) mask = mask.astype(np.float32) / 255 - mask = (mask > 0.5).astype(np.int) + mask = (mask > 0.5).astype(np.int_) mask = (np.arange(2) == mask[..., None]).astype(int) mask = mask.transpose(2, 0, 1).astype(np.float32) label = mask.reshape(1, 2, 96, 96) diff --git a/official/cv/Unet/src/data_loader.py b/official/cv/Unet/src/data_loader.py index f2fc3eb15e81f670d24a3e4006ba16b63d98398c..e90863f6622a4866c393464a046a1d7b193ef98b 100644 --- a/official/cv/Unet/src/data_loader.py +++ b/official/cv/Unet/src/data_loader.py @@ -32,7 +32,7 @@ def _load_multipage_tiff(path): def _get_val_train_indices(length, fold, ratio=0.8): assert 0 < ratio <= 1, "Train/total data ratio must be in range (0.0, 1.0]" np.random.seed(0) - indices = np.arange(0, length, 1, dtype=np.int) + indices = np.arange(0, length, 1, dtype=np.int_) np.random.shuffle(indices) if fold is not None: @@ -49,7 +49,7 @@ def _get_val_train_indices(length, fold, ratio=0.8): def data_post_process(img, mask): img = np.expand_dims(img, axis=0) - mask = (mask > 0.5).astype(np.int) + mask = (mask > 0.5).astype(np.int_) mask = (np.arange(mask.max() + 1) == mask[..., None]).astype(int) mask = mask.transpose(2, 0, 1).astype(np.float32) return img, mask @@ -238,9 +238,9 @@ def preprocess_img_mask(img, mask, num_classes, img_size, augment=False, eval_re img = img.transpose(2, 0, 1) if num_classes == 2: mask = mask.astype(np.float32) / mask.max() - mask = (mask > 0.5).astype(np.int) + mask = (mask > 0.5).astype(np.int_) else: - mask = mask.astype(np.int) + mask = mask.astype(np.int_) mask = (np.arange(num_classes) == mask[..., None]).astype(int) mask = mask.transpose(2, 0, 1).astype(np.float32) return img, mask diff --git a/official/cv/VIT/src/autoaugment.py b/official/cv/VIT/src/autoaugment.py index 737e1945a8551c3cd99f8886dce7f20ee02555e1..fc5426f4f29152527f94982bda53df44ea11265c 100644 --- a/official/cv/VIT/src/autoaugment.py +++ b/official/cv/VIT/src/autoaugment.py @@ -207,7 +207,7 @@ class SubPolicy(): "translateY": np.linspace(0, 150 / 331, 10), "rotate": np.linspace(0, 30, 10), "color": np.linspace(0.0, 0.9, 10), - "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int), + "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int_), "solarize": np.linspace(256, 0, 10), "contrast": np.linspace(0.0, 0.9, 10), "sharpness": np.linspace(0.0, 0.9, 10), diff --git a/official/cv/YOLOv4/src/coco_visual.py b/official/cv/YOLOv4/src/coco_visual.py index ea9459295092e0128612dde1c6398a0ef588bf33..a15a70ee2ff2739a1287c8426d141eb5d0217904 100644 --- a/official/cv/YOLOv4/src/coco_visual.py +++ b/official/cv/YOLOv4/src/coco_visual.py @@ -551,8 +551,8 @@ class DetectEval(COCOeval): assert (tps.shape[0]) == 1 assert (fps.shape[0]) == 1 - tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float) - fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float) + tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float_) + fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float_) ids = catIds[k0] label = labels[ids]