Random_anna.mp4 May 2026
# Detecting objects blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False) net.setInput(blob) outs = net.forward(output_layers)
class_ids = [] confidences = [] boxes = [] for out in outs: for detection in out: scores = detection[5:] class_id = np.argmax(scores) confidence = scores[class_id] if confidence > 0.5 and classes[class_id] == "person": # Filter by class and confidence # Object detected center_x = int(detection[0] * width) center_y = int(detection[1] * height) w = int(detection[2] * width) h = int(detection[3] * height) # Rectangle coordinates x = int(center_x - w / 2) y = int(center_y - h / 2) boxes.append([x, y, w, h]) confidences.append(float(confidence)) class_ids.append(class_id) random_anna.mp4
video = cv2.VideoCapture('random_anna.mp4') # Detecting objects blob = cv2
# Load YOLO net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg") classes = [] with open("coco.names", "r") as f: classes = [line.strip() for line in f.readlines()] # Detecting objects blob = cv2.dnn.blobFromImage(frame
height, width, channels = frame.shape
cv2.imshow("Image", frame) if cv2.waitKey(1) & 0xFF == ord('q'): break
while video.isOpened(): ret, frame = video.read() if not ret: break