Skip to content
Projeler
Gruplar
Parçacıklar
Yardım
Yükleniyor...
Oturum aç / Kaydol
Gezinmeyi değiştir
K
keras-retinanet
Proje
Proje
Ayrıntılar
Etkinlik
Cycle Analytics
Depo (repository)
Depo (repository)
Dosyalar
Kayıtlar (commit)
Dallar (branch)
Etiketler
Katkıda bulunanlar
Grafik
Karşılaştır
Grafikler
Konular (issue)
0
Konular (issue)
0
Liste
Pano
Etiketler
Kilometre Taşları
Birleştirme (merge) Talepleri
0
Birleştirme (merge) Talepleri
0
CI / CD
CI / CD
İş akışları (pipeline)
İşler
Zamanlamalar
Grafikler
Paketler
Paketler
Wiki
Wiki
Parçacıklar
Parçacıklar
Üyeler
Üyeler
Collapse sidebar
Close sidebar
Etkinlik
Grafik
Grafikler
Yeni bir konu (issue) oluştur
İşler
Kayıtlar (commit)
Konu (issue) Panoları
Kenar çubuğunu aç
Seyfullah Tıkıç
keras-retinanet
Commits
32ce743c
Unverified
Kaydet (Commit)
32ce743c
authored
Kas 07, 2019
tarafından
Hans Gaiser
Kaydeden (comit)
GitHub
Kas 07, 2019
Dosyalara gözat
Seçenekler
Dosyalara Gözat
İndir
Sade Fark
Merge pull request #1162 from SalahAdDin/eval-infere-time
Added inference time measure.
üst
a7dad0be
a7966a5f
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
15 additions
and
5 deletions
+15
-5
evaluate.py
keras_retinanet/bin/evaluate.py
+3
-1
eval.py
keras_retinanet/utils/eval.py
+12
-4
No files found.
keras_retinanet/bin/evaluate.py
Dosyayı görüntüle @
32ce743c
...
...
@@ -152,7 +152,7 @@ def main(args=None):
from
..utils.coco_eval
import
evaluate_coco
evaluate_coco
(
generator
,
model
,
args
.
score_threshold
)
else
:
average_precisions
=
evaluate
(
average_precisions
,
inference_time
=
evaluate
(
generator
,
model
,
iou_threshold
=
args
.
iou_threshold
,
...
...
@@ -174,6 +174,8 @@ def main(args=None):
print
(
'No test instances found.'
)
return
print
(
'Inference time for {:.0f} images: {:.4f}'
.
format
(
generator
.
size
(),
inference_time
))
print
(
'mAP using the weighted average of precisions among classes: {:.4f}'
.
format
(
sum
([
a
*
b
for
a
,
b
in
zip
(
total_instances
,
precisions
)])
/
sum
(
total_instances
)))
print
(
'mAP: {:.4f}'
.
format
(
sum
(
precisions
)
/
sum
(
x
>
0
for
x
in
total_instances
)))
...
...
keras_retinanet/utils/eval.py
Dosyayı görüntüle @
32ce743c
...
...
@@ -20,6 +20,7 @@ from .visualization import draw_detections, draw_annotations
import
keras
import
numpy
as
np
import
os
import
time
import
cv2
import
progressbar
...
...
@@ -71,6 +72,7 @@ def _get_detections(generator, model, score_threshold=0.05, max_detections=100,
A list of lists containing the detections for each image in the generator.
"""
all_detections
=
[[
None
for
i
in
range
(
generator
.
num_classes
())
if
generator
.
has_label
(
i
)]
for
j
in
range
(
generator
.
size
())]
all_inferences
=
[
None
for
i
in
range
(
generator
.
size
())]
for
i
in
progressbar
.
progressbar
(
range
(
generator
.
size
()),
prefix
=
'Running network: '
):
raw_image
=
generator
.
load_image
(
i
)
...
...
@@ -81,7 +83,9 @@ def _get_detections(generator, model, score_threshold=0.05, max_detections=100,
image
=
image
.
transpose
((
2
,
0
,
1
))
# run network
start
=
time
.
time
()
boxes
,
scores
,
labels
=
model
.
predict_on_batch
(
np
.
expand_dims
(
image
,
axis
=
0
))[:
3
]
inference_time
=
time
.
time
()
-
start
# correct boxes for image scale
boxes
/=
scale
...
...
@@ -113,8 +117,10 @@ def _get_detections(generator, model, score_threshold=0.05, max_detections=100,
continue
all_detections
[
i
][
label
]
=
image_detections
[
image_detections
[:,
-
1
]
==
label
,
:
-
1
]
return
all_detections
all_inferences
[
i
]
=
inference_time
return
all_detections
,
all_inferences
def
_get_annotations
(
generator
):
...
...
@@ -165,7 +171,7 @@ def evaluate(
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_detections
=
_get_detections
(
generator
,
model
,
score_threshold
=
score_threshold
,
max_detections
=
max_detections
,
save_path
=
save_path
)
all_detections
,
all_inferences
=
_get_detections
(
generator
,
model
,
score_threshold
=
score_threshold
,
max_detections
=
max_detections
,
save_path
=
save_path
)
all_annotations
=
_get_annotations
(
generator
)
average_precisions
=
{}
...
...
@@ -232,4 +238,6 @@ def evaluate(
average_precision
=
_compute_ap
(
recall
,
precision
)
average_precisions
[
label
]
=
average_precision
,
num_annotations
return
average_precisions
inference_time
=
np
.
sum
(
all_inferences
)
/
generator
.
size
()
return
average_precisions
,
inference_time
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment