Skip to content

Visualizing Results

Draw bounding boxes on images to highlight detected disease areas.

Bounding boxes

Request detections with include_bbox=true. Coordinates are normalized (0-1). Format: [x, y, width, height] where (x,y) is top-left corner.


Coordinate System

┌─────────────────────────┐
│ (0,0)                   │
│     ┌───────┐           │
│     │ bbox  │           │
│     │       │           │
│     └───────┘           │
│                   (1,1) │
└─────────────────────────┘

bbox = [x, y, width, height]
     = [0.15, 0.30, 0.25, 0.20]

To convert to pixels:
  pixel_x = x * image_width
  pixel_y = y * image_height
  pixel_w = width * image_width
  pixel_h = height * image_height

Drawing Bounding Boxes

from PIL import Image, ImageDraw, ImageFont


def draw_detections(
    image_path: str,
    detections: list,
    output_path: str = None
) -> Image.Image:
    """
    Draw bounding boxes on plant image.

    Args:
        image_path: Path to original image
        detections: List of detection objects from API response
        output_path: Optional path to save annotated image

    Returns:
        PIL Image with drawn boxes
    """
    img = Image.open(image_path)
    draw = ImageDraw.Draw(img)
    img_w, img_h = img.size

    # Color based on confidence
    def get_color(confidence: float) -> str:
        if confidence >= 0.8:
            return "#FF0000"  # Red - high confidence
        elif confidence >= 0.5:
            return "#FFA500"  # Orange - moderate
        return "#FFFF00"      # Yellow - low

    for det in detections:
        x, y, w, h = det["bbox"]

        # Convert normalized to pixel coordinates
        left = int(x * img_w)
        top = int(y * img_h)
        right = int((x + w) * img_w)
        bottom = int((y + h) * img_h)

        color = get_color(det["confidence"])

        # Draw rectangle
        draw.rectangle([left, top, right, bottom], outline=color, width=3)

        # Draw label
        label = f"{det['name']} ({det['confidence']:.0%})"
        draw.text((left, top - 20), label, fill=color)

    if output_path:
        img.save(output_path)

    return img


# Usage
result = client.diagnose("plant.jpg", include_bbox=True)

if result.get("detections"):
    annotated = draw_detections(
        "plant.jpg",
        result["detections"],
        "plant_annotated.jpg"
    )
    annotated.show()
import cv2
import numpy as np


def draw_detections_cv2(
    image_path: str,
    detections: list,
    output_path: str = None
) -> np.ndarray:
    """Draw bounding boxes using OpenCV."""
    img = cv2.imread(image_path)
    img_h, img_w = img.shape[:2]

    # Colors (BGR format)
    colors = {
        "high": (0, 0, 255),    # Red
        "medium": (0, 165, 255), # Orange
        "low": (0, 255, 255)     # Yellow
    }

    for det in detections:
        x, y, w, h = det["bbox"]

        # Convert to pixel coordinates
        x1 = int(x * img_w)
        y1 = int(y * img_h)
        x2 = int((x + w) * img_w)
        y2 = int((y + h) * img_h)

        # Select color based on confidence
        conf = det["confidence"]
        color = colors["high"] if conf >= 0.8 else colors["medium"] if conf >= 0.5 else colors["low"]

        # Draw rectangle
        cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)

        # Draw label with background
        label = f"{det['name']} {conf:.0%}"
        (label_w, label_h), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
        cv2.rectangle(img, (x1, y1 - label_h - 10), (x1 + label_w, y1), color, -1)
        cv2.putText(img, label, (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)

    if output_path:
        cv2.imwrite(output_path, img)

    return img


# Usage
annotated = draw_detections_cv2("plant.jpg", result["detections"])
cv2.imshow("Detections", annotated)
cv2.waitKey(0)
function drawDetections(imageElement, detections, canvas) {
  const ctx = canvas.getContext('2d');

  // Set canvas size to match image
  canvas.width = imageElement.naturalWidth;
  canvas.height = imageElement.naturalHeight;

  // Draw the image
  ctx.drawImage(imageElement, 0, 0);

  // Color based on confidence
  function getColor(confidence) {
    if (confidence >= 0.8) return '#FF0000';
    if (confidence >= 0.5) return '#FFA500';
    return '#FFFF00';
  }

  detections.forEach(det => {
    const [x, y, w, h] = det.bbox;

    // Convert to pixel coordinates
    const pixelX = x * canvas.width;
    const pixelY = y * canvas.height;
    const pixelW = w * canvas.width;
    const pixelH = h * canvas.height;

    const color = getColor(det.confidence);

    // Draw rectangle
    ctx.strokeStyle = color;
    ctx.lineWidth = 3;
    ctx.strokeRect(pixelX, pixelY, pixelW, pixelH);

    // Draw label
    const label = `${det.name} (${(det.confidence * 100).toFixed(0)}%)`;
    ctx.fillStyle = color;
    ctx.font = '16px Inter, sans-serif';
    ctx.fillText(label, pixelX, pixelY - 5);
  });
}

// Usage
const img = document.getElementById('plantImage');
const canvas = document.getElementById('annotationCanvas');

fetch('https://api.tajirifarm.com/diagnoses/', {
  method: 'POST',
  body: formData  // with include_bbox=true
})
  .then(res => res.json())
  .then(result => {
    if (result.detections) {
      drawDetections(img, result.detections, canvas);
    }
  });

React Component

Complete React component for image annotation.

import { useRef, useEffect } from 'react';

interface Detection {
  name: string;
  bbox: [number, number, number, number];
  confidence: number;
}

interface AnnotatedImageProps {
  src: string;
  detections: Detection[];
  className?: string;
}

export function AnnotatedImage({ src, detections, className }: AnnotatedImageProps) {
  const canvasRef = useRef<HTMLCanvasElement>(null);
  const imageRef = useRef<HTMLImageElement>(null);

  useEffect(() => {
    const canvas = canvasRef.current;
    const image = imageRef.current;

    if (!canvas || !image || !detections.length) return;

    const ctx = canvas.getContext('2d');
    if (!ctx) return;

    const draw = () => {
      canvas.width = image.naturalWidth;
      canvas.height = image.naturalHeight;

      ctx.drawImage(image, 0, 0);

      detections.forEach(det => {
        const [x, y, w, h] = det.bbox;
        const px = x * canvas.width;
        const py = y * canvas.height;
        const pw = w * canvas.width;
        const ph = h * canvas.height;

        // Color by confidence
        const color = det.confidence >= 0.8 ? '#FF0000' :
                      det.confidence >= 0.5 ? '#FFA500' : '#FFFF00';

        // Box
        ctx.strokeStyle = color;
        ctx.lineWidth = 3;
        ctx.strokeRect(px, py, pw, ph);

        // Label background
        const label = `${det.name} ${(det.confidence * 100).toFixed(0)}%`;
        ctx.font = '14px Inter, sans-serif';
        const labelWidth = ctx.measureText(label).width + 8;

        ctx.fillStyle = color;
        ctx.fillRect(px, py - 22, labelWidth, 20);

        ctx.fillStyle = '#FFFFFF';
        ctx.fillText(label, px + 4, py - 7);
      });
    };

    if (image.complete) {
      draw();
    } else {
      image.onload = draw;
    }
  }, [src, detections]);

  return (
    <div className={className} style={{ position: 'relative' }}>
      <img
        ref={imageRef}
        src={src}
        alt="Original"
        style={{ display: 'none' }}
      />
      <canvas ref={canvasRef} style={{ maxWidth: '100%' }} />
    </div>
  );
}

// Usage
<AnnotatedImage
  src="/images/plant.jpg"
  detections={result.detections}
  className="diagnosis-image"
/>

Streamlit Integration

For data science applications using Streamlit.

import streamlit as st
from PIL import Image
import requests
import io


def draw_boxes_streamlit(image: Image.Image, detections: list) -> Image.Image:
    """Draw boxes and return annotated image for Streamlit."""
    from PIL import ImageDraw

    img = image.copy()
    draw = ImageDraw.Draw(img)
    img_w, img_h = img.size

    for det in detections:
        x, y, w, h = det["bbox"]
        left, top = int(x * img_w), int(y * img_h)
        right, bottom = int((x + w) * img_w), int((y + h) * img_h)

        color = "#FF0000" if det["confidence"] >= 0.8 else "#FFA500"
        draw.rectangle([left, top, right, bottom], outline=color, width=3)
        draw.text((left, top - 15), f"{det['name']} {det['confidence']:.0%}", fill=color)

    return img


# Streamlit app
st.title("Plant Disease Diagnosis")

uploaded_file = st.file_uploader("Upload plant image", type=["jpg", "png", "webp"])

if uploaded_file:
    image = Image.open(uploaded_file)
    st.image(image, caption="Uploaded Image")

    if st.button("Diagnose"):
        with st.spinner("Analyzing..."):
            # Call API
            uploaded_file.seek(0)
            response = requests.post(
                "https://api.tajirifarm.com/diagnoses/",
                files={"image": uploaded_file},
                data={"include_bbox": "true"}
            )
            result = response.json()

        # Display results
        st.subheader(f"Health: {result['crop_health']}")

        for diag in result["diagnoses"]:
            st.write(f"**{diag['name']}** - {diag['confidence']:.0%}")

        # Show annotated image
        if result.get("detections"):
            annotated = draw_boxes_streamlit(image, result["detections"])
            st.image(annotated, caption="Detected Areas")