Skip to content

Commit c90a465

Browse files
committed
Fixing shift in the bounding box of predictions
1 parent 2eb5422 commit c90a465

3 files changed

Lines changed: 8 additions & 14 deletions

File tree

docker/dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ RUN pip install -r requirements.txt
3131
WORKDIR ${HOME}
3232
#Checkout version should be 4.4.0 when the new version is released
3333
RUN git clone http://github.com/opencv/opencv.git && cd opencv \
34-
&& git checkout 992c908b566d264b824680d0cf7d668cdf918254 \
34+
&& git checkout 4.4.0 \
3535
&& mkdir build && cd build \
3636
&& cmake -D CMAKE_BUILD_TYPE=RELEASE \
3737
-D CMAKE_INSTALL_PREFIX=/usr/local \

src/main/inference/inference_engines_factory.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def get_engine(path_to_model):
2727
# import one of the available inference engine class (in this project there's only one), and return a
2828
# model instance
2929
if inference_engine_name=='yolov3_opencv_cpu_detection' or inference_engine_name=='yolov4_opencv_cpu_detection':
30-
return getattr(__import__("yolov3_opencv_cpu_detection"), 'InferenceEngine')(path_to_model)
30+
return getattr(__import__("yolo_opencv_cpu_detection"), 'InferenceEngine')(path_to_model)
3131
except ApplicationError as e:
3232
raise e
3333
except Exception as e:

src/main/inference/yolov3_opencv_cpu_detection.py renamed to src/main/inference/yolo_opencv_cpu_detection.py

Lines changed: 6 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ def load(self):
3636
with open(os.path.join(self.model_path, 'obj.names'), 'r') as f:
3737
self.labels = [line.strip() for line in f.readlines()]
3838
self.net = cv2.dnn.readNet(os.path.join(self.model_path, 'yolo-obj.cfg'),
39-
os.path.join(self.model_path, 'yolo-obj.weights'))
39+
os.path.join(self.model_path, 'yolo-obj.weights'),'darknet')
4040

4141
async def infer(self, input_data, draw, predict_batch):
4242
await asyncio.sleep(0.00001)
@@ -55,10 +55,10 @@ async def infer(self, input_data, draw, predict_batch):
5555
height, width, depth = np_image.shape
5656
# create input blob
5757
blob = cv2.dnn.blobFromImage(
58-
np_image, self.scale, (self.image_width, self.image_height), (self.R_mean, self.G_mean, self.B_mean),
59-
self.swapRB, self.crop)
58+
np_image, size=(self.image_width, self.image_height), swapRB=self.swapRB, ddepth=cv2.CV_8U)
59+
6060
# feed the blob to the network
61-
self.net.setInput(blob)
61+
self.net.setInput(blob, scalefactor=self.scale, mean=[self.R_mean,self.G_mean,self.B_mean])
6262
# get the output layers
6363
output_layers = self.net.forward(self.__get_output_layers__())
6464
# for each detection from each output layer
@@ -87,13 +87,7 @@ async def infer(self, input_data, draw, predict_batch):
8787
remaining_indices = cv2.dnn.NMSBoxes(
8888
boxes, confidences, conf_threshold, nms_threshold)
8989

90-
for i in range(len(boxes)):
91-
# i = i[0]
92-
box = boxes[i]
93-
x = box[0]
94-
y = box[1]
95-
w = box[2]
96-
h = box[3]
90+
9791

9892
# release resources
9993
cv2.destroyAllWindows()
@@ -115,7 +109,7 @@ async def infer(self, input_data, draw, predict_batch):
115109

116110
if (left < 0):
117111
left = 0
118-
if (right > height- 1):
112+
if (right > width- 1):
119113
right = width - 1
120114
if (top < 0):
121115
top = 0

0 commit comments

Comments
 (0)