Skip to content

Module sagemaker_defect_detection.utils.coco_eval

BSD 3-Clause License

Copyright (c) Soumith Chintala 2016, All rights reserved.

Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:

  • Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.

  • Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.

  • Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

This module is a modified version of https://github.com/pytorch/vision/tree/03b1d38ba3c67703e648fb067570eeb1a1e61265/references/detection

View Source
"""

BSD 3-Clause License

Copyright (c) Soumith Chintala 2016,

All rights reserved.

Redistribution and use in source and binary forms, with or without

modification, are permitted provided that the following conditions are met:

* Redistributions of source code must retain the above copyright notice, this

  list of conditions and the following disclaimer.

* Redistributions in binary form must reproduce the above copyright notice,

  this list of conditions and the following disclaimer in the documentation

  and/or other materials provided with the distribution.

* Neither the name of the copyright holder nor the names of its

  contributors may be used to endorse or promote products derived from

  this software without specific prior written permission.

THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"

AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE

IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE

DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE

FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL

DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR

SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER

CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,

OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE

OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

This module is a modified version of https://github.com/pytorch/vision/tree/03b1d38ba3c67703e648fb067570eeb1a1e61265/references/detection

"""

import json

import numpy as np

import copy

import torch

import pickle

import torch.distributed as dist

from pycocotools.cocoeval import COCOeval

from pycocotools.coco import COCO

import pycocotools.mask as mask_util

from collections import defaultdict

def is_dist_avail_and_initialized():

    if not dist.is_available():

        return False

    if not dist.is_initialized():

        return False

    return True

def get_world_size():

    if not is_dist_avail_and_initialized():

        return 1

    return dist.get_world_size()

def all_gather(data):

    """

    Run all_gather on arbitrary picklable data (not necessarily tensors)

    Args:

        data: any picklable object

    Returns:

        list[data]: list of data gathered from each rank

    """

    world_size = get_world_size()

    if world_size == 1:

        return [data]

    # serialized to a Tensor

    buffer = pickle.dumps(data)

    storage = torch.ByteStorage.from_buffer(buffer)

    tensor = torch.ByteTensor(storage).to("cuda")

    # obtain Tensor size of each rank

    local_size = torch.tensor([tensor.numel()], device="cuda")

    size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]

    dist.all_gather(size_list, local_size)

    size_list = [int(size.item()) for size in size_list]

    max_size = max(size_list)

    # receiving Tensor from all ranks

    # we pad the tensor because torch all_gather does not support

    # gathering tensors of different shapes

    tensor_list = []

    for _ in size_list:

        tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))

    if local_size != max_size:

        padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")

        tensor = torch.cat((tensor, padding), dim=0)

    dist.all_gather(tensor_list, tensor)

    data_list = []

    for size, tensor in zip(size_list, tensor_list):

        buffer = tensor.cpu().numpy().tobytes()[:size]

        data_list.append(pickle.loads(buffer))

    return data_list

class CocoEvaluator(object):

    def __init__(self, coco_gt, iou_types):

        assert isinstance(iou_types, (list, tuple))

        coco_gt = copy.deepcopy(coco_gt)

        self.coco_gt = coco_gt

        self.iou_types = iou_types

        self.coco_eval = {}

        for iou_type in iou_types:

            self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)

        self.img_ids = []

        self.eval_imgs = {k: [] for k in iou_types}

    def update(self, predictions):

        img_ids = list(np.unique(list(predictions.keys())))

        self.img_ids.extend(img_ids)

        for iou_type in self.iou_types:

            results = self.prepare(predictions, iou_type)

            coco_dt = loadRes(self.coco_gt, results) if results else COCO()

            coco_eval = self.coco_eval[iou_type]

            coco_eval.cocoDt = coco_dt

            coco_eval.params.imgIds = list(img_ids)

            img_ids, eval_imgs = evaluate(coco_eval)

            if isinstance(self.eval_imgs[iou_type], np.ndarray):

                self.eval_imgs[iou_type] = self.eval_imgs[iou_type].tolist()

            self.eval_imgs[iou_type].append(eval_imgs)

    def synchronize_between_processes(self):

        for iou_type in self.iou_types:

            self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)

            create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])

    def accumulate(self):

        for coco_eval in self.coco_eval.values():

            coco_eval.accumulate()

    def summarize(self):

        for iou_type, coco_eval in self.coco_eval.items():

            print("IoU metric: {}".format(iou_type))

            coco_eval.summarize()

    def prepare(self, predictions, iou_type):

        return self.prepare_for_coco_detection(predictions)

    def prepare_for_coco_detection(self, predictions):

        coco_results = []

        for original_id, prediction in predictions.items():

            if len(prediction) == 0:

                continue

            boxes = prediction["boxes"]

            boxes = convert_to_xywh(boxes).tolist()

            scores = prediction["scores"].tolist()

            labels = prediction["labels"].tolist()

            coco_results.extend(

                [

                    {

                        "image_id": original_id,

                        "category_id": labels[k],

                        "bbox": box,

                        "score": scores[k],

                    }

                    for k, box in enumerate(boxes)

                ]

            )

        return coco_results

def convert_to_xywh(boxes):

    xmin, ymin, xmax, ymax = boxes.unbind(1)

    return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)

def merge(img_ids, eval_imgs):

    all_img_ids = all_gather(img_ids)

    all_eval_imgs = all_gather(eval_imgs)

    merged_img_ids = []

    for p in all_img_ids:

        merged_img_ids.extend(p)

    merged_eval_imgs = []

    for p in all_eval_imgs:

        merged_eval_imgs.append(p)

    merged_img_ids = np.array(merged_img_ids)

    merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)

    # keep only unique (and in sorted order) images

    merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)

    merged_eval_imgs = merged_eval_imgs[..., idx]

    return merged_img_ids, merged_eval_imgs

def create_common_coco_eval(coco_eval, img_ids, eval_imgs):

    img_ids, eval_imgs = merge(img_ids, eval_imgs)

    img_ids = list(img_ids)

    eval_imgs = list(eval_imgs.flatten())

    coco_eval.evalImgs = eval_imgs

    coco_eval.params.imgIds = img_ids

    coco_eval._paramsEval = copy.deepcopy(coco_eval.params)

#################################################################

# From pycocotools, just removed the prints and fixed

# a Python3 bug about unicode not defined

#################################################################

# Ideally, pycocotools wouldn't have hard-coded prints

# so that we could avoid copy-pasting those two functions

def createIndex(self):

    # create index

    # print('creating index...')

    anns, cats, imgs = {}, {}, {}

    imgToAnns, catToImgs = defaultdict(list), defaultdict(list)

    if "annotations" in self.dataset:

        for ann in self.dataset["annotations"]:

            imgToAnns[ann["image_id"]].append(ann)

            anns[ann["id"]] = ann

    if "images" in self.dataset:

        for img in self.dataset["images"]:

            imgs[img["id"]] = img

    if "categories" in self.dataset:

        for cat in self.dataset["categories"]:

            cats[cat["id"]] = cat

    if "annotations" in self.dataset and "categories" in self.dataset:

        for ann in self.dataset["annotations"]:

            catToImgs[ann["category_id"]].append(ann["image_id"])

    # print('index created!')

    # create class members

    self.anns = anns

    self.imgToAnns = imgToAnns

    self.catToImgs = catToImgs

    self.imgs = imgs

    self.cats = cats

maskUtils = mask_util

def loadRes(self, resFile):

    """

    Load result file and return a result api object.

    :param   resFile (str)     : file name of result file

    :return: res (obj)         : result api object

    """

    res = COCO()

    res.dataset["images"] = [img for img in self.dataset["images"]]

    # print('Loading and preparing results...')

    # tic = time.time()

    if isinstance(resFile, torch._six.string_classes):

        anns = json.load(open(resFile))

    elif type(resFile) == np.ndarray:

        anns = self.loadNumpyAnnotations(resFile)

    else:

        anns = resFile

    assert type(anns) == list, "results in not an array of objects"

    annsImgIds = [ann["image_id"] for ann in anns]

    assert set(annsImgIds) == (

        set(annsImgIds) & set(self.getImgIds())

    ), "Results do not correspond to current coco set"

    if "caption" in anns[0]:

        imgIds = set([img["id"] for img in res.dataset["images"]]) & set([ann["image_id"] for ann in anns])

        res.dataset["images"] = [img for img in res.dataset["images"] if img["id"] in imgIds]

        for id, ann in enumerate(anns):

            ann["id"] = id + 1

    elif "bbox" in anns[0] and not anns[0]["bbox"] == []:

        res.dataset["categories"] = copy.deepcopy(self.dataset["categories"])

        for id, ann in enumerate(anns):

            bb = ann["bbox"]

            x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]

            if "segmentation" not in ann:

                ann["segmentation"] = [[x1, y1, x1, y2, x2, y2, x2, y1]]

            ann["area"] = bb[2] * bb[3]

            ann["id"] = id + 1

            ann["iscrowd"] = 0

    res.dataset["annotations"] = anns

    createIndex(res)

    return res

def evaluate(self):

    """

    Run per image evaluation on given images and store results (a list of dict) in self.evalImgs

    :return: None

    """

    # tic = time.time()

    # print('Running per image evaluation...')

    p = self.params

    # add backward compatibility if useSegm is specified in params

    if p.useSegm is not None:

        p.iouType = "segm" if p.useSegm == 1 else "bbox"

        print("useSegm (deprecated) is not None. Running {} evaluation".format(p.iouType))

    # print('Evaluate annotation type *{}*'.format(p.iouType))

    p.imgIds = list(np.unique(p.imgIds))

    if p.useCats:

        p.catIds = list(np.unique(p.catIds))

    p.maxDets = sorted(p.maxDets)

    self.params = p

    self._prepare()

    # loop through images, area range, max detection number

    catIds = p.catIds if p.useCats else [-1]

    if p.iouType == "segm" or p.iouType == "bbox":

        computeIoU = self.computeIoU

    elif p.iouType == "keypoints":

        computeIoU = self.computeOks

    self.ious = {(imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds}

    evaluateImg = self.evaluateImg

    maxDet = p.maxDets[-1]

    evalImgs = [

        evaluateImg(imgId, catId, areaRng, maxDet) for catId in catIds for areaRng in p.areaRng for imgId in p.imgIds

    ]

    # this is NOT in the pycocotools code, but could be done outside

    evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))

    self._paramsEval = copy.deepcopy(self.params)

    # toc = time.time()

    # print('DONE (t={:0.2f}s).'.format(toc-tic))

    return p.imgIds, evalImgs

#################################################################

# end of straight copy from pycocotools, just removing the prints

#################################################################

Variables

maskUtils

Functions

all_gather

def all_gather(
    data
)

Run all_gather on arbitrary picklable data (not necessarily tensors)

Parameters:

Name Type Description Default
data None any picklable object None

Returns:

Type Description
list[data] list of data gathered from each rank
View Source
def all_gather(data):

    """

    Run all_gather on arbitrary picklable data (not necessarily tensors)

    Args:

        data: any picklable object

    Returns:

        list[data]: list of data gathered from each rank

    """

    world_size = get_world_size()

    if world_size == 1:

        return [data]

    # serialized to a Tensor

    buffer = pickle.dumps(data)

    storage = torch.ByteStorage.from_buffer(buffer)

    tensor = torch.ByteTensor(storage).to("cuda")

    # obtain Tensor size of each rank

    local_size = torch.tensor([tensor.numel()], device="cuda")

    size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]

    dist.all_gather(size_list, local_size)

    size_list = [int(size.item()) for size in size_list]

    max_size = max(size_list)

    # receiving Tensor from all ranks

    # we pad the tensor because torch all_gather does not support

    # gathering tensors of different shapes

    tensor_list = []

    for _ in size_list:

        tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))

    if local_size != max_size:

        padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")

        tensor = torch.cat((tensor, padding), dim=0)

    dist.all_gather(tensor_list, tensor)

    data_list = []

    for size, tensor in zip(size_list, tensor_list):

        buffer = tensor.cpu().numpy().tobytes()[:size]

        data_list.append(pickle.loads(buffer))

    return data_list

convert_to_xywh

def convert_to_xywh(
    boxes
)
View Source
def convert_to_xywh(boxes):

    xmin, ymin, xmax, ymax = boxes.unbind(1)

    return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)

createIndex

def createIndex(
    self
)
View Source
def createIndex(self):

    # create index

    # print('creating index...')

    anns, cats, imgs = {}, {}, {}

    imgToAnns, catToImgs = defaultdict(list), defaultdict(list)

    if "annotations" in self.dataset:

        for ann in self.dataset["annotations"]:

            imgToAnns[ann["image_id"]].append(ann)

            anns[ann["id"]] = ann

    if "images" in self.dataset:

        for img in self.dataset["images"]:

            imgs[img["id"]] = img

    if "categories" in self.dataset:

        for cat in self.dataset["categories"]:

            cats[cat["id"]] = cat

    if "annotations" in self.dataset and "categories" in self.dataset:

        for ann in self.dataset["annotations"]:

            catToImgs[ann["category_id"]].append(ann["image_id"])

    # print('index created!')

    # create class members

    self.anns = anns

    self.imgToAnns = imgToAnns

    self.catToImgs = catToImgs

    self.imgs = imgs

    self.cats = cats

create_common_coco_eval

def create_common_coco_eval(
    coco_eval,
    img_ids,
    eval_imgs
)
View Source
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):

    img_ids, eval_imgs = merge(img_ids, eval_imgs)

    img_ids = list(img_ids)

    eval_imgs = list(eval_imgs.flatten())

    coco_eval.evalImgs = eval_imgs

    coco_eval.params.imgIds = img_ids

    coco_eval._paramsEval = copy.deepcopy(coco_eval.params)

evaluate

def evaluate(
    self
)

Run per image evaluation on given images and store results (a list of dict) in self.evalImgs

Returns:

Type Description
None None
View Source
def evaluate(self):

    """

    Run per image evaluation on given images and store results (a list of dict) in self.evalImgs

    :return: None

    """

    # tic = time.time()

    # print('Running per image evaluation...')

    p = self.params

    # add backward compatibility if useSegm is specified in params

    if p.useSegm is not None:

        p.iouType = "segm" if p.useSegm == 1 else "bbox"

        print("useSegm (deprecated) is not None. Running {} evaluation".format(p.iouType))

    # print('Evaluate annotation type *{}*'.format(p.iouType))

    p.imgIds = list(np.unique(p.imgIds))

    if p.useCats:

        p.catIds = list(np.unique(p.catIds))

    p.maxDets = sorted(p.maxDets)

    self.params = p

    self._prepare()

    # loop through images, area range, max detection number

    catIds = p.catIds if p.useCats else [-1]

    if p.iouType == "segm" or p.iouType == "bbox":

        computeIoU = self.computeIoU

    elif p.iouType == "keypoints":

        computeIoU = self.computeOks

    self.ious = {(imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds}

    evaluateImg = self.evaluateImg

    maxDet = p.maxDets[-1]

    evalImgs = [

        evaluateImg(imgId, catId, areaRng, maxDet) for catId in catIds for areaRng in p.areaRng for imgId in p.imgIds

    ]

    # this is NOT in the pycocotools code, but could be done outside

    evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))

    self._paramsEval = copy.deepcopy(self.params)

    # toc = time.time()

    # print('DONE (t={:0.2f}s).'.format(toc-tic))

    return p.imgIds, evalImgs

get_world_size

def get_world_size(

)
View Source
def get_world_size():

    if not is_dist_avail_and_initialized():

        return 1

    return dist.get_world_size()

is_dist_avail_and_initialized

def is_dist_avail_and_initialized(

)
View Source
def is_dist_avail_and_initialized():

    if not dist.is_available():

        return False

    if not dist.is_initialized():

        return False

    return True

loadRes

def loadRes(
    self,
    resFile
)

Load result file and return a result api object.

Parameters:

Name Type Description Default
(str) resFile file name of result file None

Returns:

Type Description
None res (obj) : result api object
View Source
def loadRes(self, resFile):

    """

    Load result file and return a result api object.

    :param   resFile (str)     : file name of result file

    :return: res (obj)         : result api object

    """

    res = COCO()

    res.dataset["images"] = [img for img in self.dataset["images"]]

    # print('Loading and preparing results...')

    # tic = time.time()

    if isinstance(resFile, torch._six.string_classes):

        anns = json.load(open(resFile))

    elif type(resFile) == np.ndarray:

        anns = self.loadNumpyAnnotations(resFile)

    else:

        anns = resFile

    assert type(anns) == list, "results in not an array of objects"

    annsImgIds = [ann["image_id"] for ann in anns]

    assert set(annsImgIds) == (

        set(annsImgIds) & set(self.getImgIds())

    ), "Results do not correspond to current coco set"

    if "caption" in anns[0]:

        imgIds = set([img["id"] for img in res.dataset["images"]]) & set([ann["image_id"] for ann in anns])

        res.dataset["images"] = [img for img in res.dataset["images"] if img["id"] in imgIds]

        for id, ann in enumerate(anns):

            ann["id"] = id + 1

    elif "bbox" in anns[0] and not anns[0]["bbox"] == []:

        res.dataset["categories"] = copy.deepcopy(self.dataset["categories"])

        for id, ann in enumerate(anns):

            bb = ann["bbox"]

            x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]

            if "segmentation" not in ann:

                ann["segmentation"] = [[x1, y1, x1, y2, x2, y2, x2, y1]]

            ann["area"] = bb[2] * bb[3]

            ann["id"] = id + 1

            ann["iscrowd"] = 0

    res.dataset["annotations"] = anns

    createIndex(res)

    return res

merge

def merge(
    img_ids,
    eval_imgs
)
View Source
def merge(img_ids, eval_imgs):

    all_img_ids = all_gather(img_ids)

    all_eval_imgs = all_gather(eval_imgs)

    merged_img_ids = []

    for p in all_img_ids:

        merged_img_ids.extend(p)

    merged_eval_imgs = []

    for p in all_eval_imgs:

        merged_eval_imgs.append(p)

    merged_img_ids = np.array(merged_img_ids)

    merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)

    # keep only unique (and in sorted order) images

    merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)

    merged_eval_imgs = merged_eval_imgs[..., idx]

    return merged_img_ids, merged_eval_imgs

Classes

CocoEvaluator

class CocoEvaluator(
    coco_gt,
    iou_types
)

Methods

accumulate

def accumulate(
    self
)
View Source
    def accumulate(self):

        for coco_eval in self.coco_eval.values():

            coco_eval.accumulate()

prepare

def prepare(
    self,
    predictions,
    iou_type
)
View Source
    def prepare(self, predictions, iou_type):

        return self.prepare_for_coco_detection(predictions)

prepare_for_coco_detection

def prepare_for_coco_detection(
    self,
    predictions
)
View Source
    def prepare_for_coco_detection(self, predictions):

        coco_results = []

        for original_id, prediction in predictions.items():

            if len(prediction) == 0:

                continue

            boxes = prediction["boxes"]

            boxes = convert_to_xywh(boxes).tolist()

            scores = prediction["scores"].tolist()

            labels = prediction["labels"].tolist()

            coco_results.extend(

                [

                    {

                        "image_id": original_id,

                        "category_id": labels[k],

                        "bbox": box,

                        "score": scores[k],

                    }

                    for k, box in enumerate(boxes)

                ]

            )

        return coco_results

summarize

def summarize(
    self
)
View Source
    def summarize(self):

        for iou_type, coco_eval in self.coco_eval.items():

            print("IoU metric: {}".format(iou_type))

            coco_eval.summarize()

synchronize_between_processes

def synchronize_between_processes(
    self
)
View Source
    def synchronize_between_processes(self):

        for iou_type in self.iou_types:

            self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)

            create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])

update

def update(
    self,
    predictions
)
View Source
    def update(self, predictions):

        img_ids = list(np.unique(list(predictions.keys())))

        self.img_ids.extend(img_ids)

        for iou_type in self.iou_types:

            results = self.prepare(predictions, iou_type)

            coco_dt = loadRes(self.coco_gt, results) if results else COCO()

            coco_eval = self.coco_eval[iou_type]

            coco_eval.cocoDt = coco_dt

            coco_eval.params.imgIds = list(img_ids)

            img_ids, eval_imgs = evaluate(coco_eval)

            if isinstance(self.eval_imgs[iou_type], np.ndarray):

                self.eval_imgs[iou_type] = self.eval_imgs[iou_type].tolist()

            self.eval_imgs[iou_type].append(eval_imgs)