Examples: query, "exact match", wildcard*, wild?ard, wild*rd
Fuzzy search: cake~ (finds cakes, bake)
Term boost: "red velvet"^4, chocolate^2
Field grouping: tags:(+work -"fun-stuff")
Escaping: Escape characters +-&|!(){}[]^"~*?:\ with \, e.g. \+
Range search: properties.timestamp:[1587729413488 TO *] (inclusive), properties.title:{A TO Z}(excluding A and Z)
Combinations: chocolate AND vanilla, chocolate OR vanilla, (chocolate OR vanilla) NOT "vanilla pudding"
Field search: properties.title:"The Title" AND text
Unanswered
Btw, Im Trying To Learn

btw, im trying to learn 🙂

I also created this:

import os
import sys
import torch
import torch.onnx
import traceback
from pathlib import Path
import json

# Importer fra tredjepartsbiblioteker
import onnx
from onnxsim import simplify
from clearml import Task, OutputModel
from cvat_sdk import make_client
from cvat_sdk.pytorch import ProjectVisionDataset, ExtractBoundingBoxes
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import RTDetrForObjectDetection, RTDetrImageProcessor
from transformers.onnx import export, OnnxConfig

# --- CLEARML INTEGRASJON ---
task = Task.init(project_name="Frigate", task_name="RT-DETR Finetuning CVAT")

# --- KONFIGURASJON ---
CVAT_HOST = ""
CVAT_USER = os.getenv("CVAT_USER", "")
CVAT_PASS = os.getenv("CVAT_PASS", "")
PROJECT_ID = 2

if not all([CVAT_USER, CVAT_PASS]):
    raise ValueError("Miljøvariablene CVAT_USER og CVAT_PASS må være satt.")

# --- CLEARML HYPERPARAMETRE ---
hyperparameters = {
    'MODEL_CHECKPOINT': "PekingU/rtdetr_r50vd_coco_o365",
    'NUM_EPOCHS': 10,
    'BATCH_SIZE': 4,
    'LEARNING_RATE': 1e-5,
    'width': 640,
    'height': 640
}
task.connect(hyperparameters)
task.set_parameters_as_dict(
    {f"General/{k}": v for k, v in hyperparameters.items()}
)

# --- ONNX EKSPORT (uendret) ---
class RTDetrOnnxConfig(OnnxConfig):
    @property
    def inputs(self):
        return { "pixel_values": {0: "batch", 2: "height", 3: "width"}, }
def export_to_onnx_robust(model, image_processor, output_path, device):
    print("\nStarter robust eksport til ONNX-format...")
    model.eval(); model.to(device)
    onnx_config = RTDetrOnnxConfig(model.config)
    onnx_model_path = os.path.join(output_path, "model.onnx")
    try:
        export(preprocessor=image_processor, model=model, config=onnx_config, output=Path(onnx_model_path), opset=16)
        print(f"ONNX eksportert: {onnx_model_path}")
        model_onnx = onnx.load(onnx_model_path)
        onnx.checker.check_model(model_onnx)
        print("ONNX-modellen er gyldig!")
        simplified_model, check = simplify(model_onnx)
        if check:
            optimized_path = os.path.join(output_path, "model-simplified.onnx")
            onnx.save(simplified_model, optimized_path)
            print(f"Forenklet ONNX lagret: {optimized_path}")
            return optimized_path
        else:
            print("FEIL: Kunne ikke forenkle ONNX-modellen.")
            return None
    except Exception as e:
        print(f"FEIL under robust ONNX-eksport: {e}"); traceback.print_exc(); return None

# --- HOVEDFUNKSJON FOR TRENING MED DEBUGGING ---
def train_model():
    print("Kobler til CVAT...")
    with make_client(host=CVAT_HOST, credentials=(CVAT_USER, CVAT_PASS)) as client:

        print(f"Henter labels dynamisk fra CVAT prosjekt ID: {PROJECT_ID}...")
        project = client.projects.retrieve(PROJECT_ID)
        cvat_labels = sorted(project.get_labels(), key=lambda label: label.id)

        label_map_for_cvat = {label.name: i for i, label in enumerate(cvat_labels)}

        print("\n" + "="*80)
        print("DEBUG: Dette er den 'label_map' vi sender til CVAT-SDK:")
        print(json.dumps(label_map_for_cvat, indent=2))
        print("="*80 + "\n")

        id2label_normalized = {i: label.name.lower() for i, label in enumerate(cvat_labels)}
        label2id_normalized = {name: i for i, name in id2label_normalized.items()}
        task.connect_label_enumeration(label2id_normalized)
        print(":white_check_mark: Labels registrert i ClearML (normalisert).")

        print("\nLaster datasett fra CVAT...")
        image_processor = RTDetrImageProcessor.from_pretrained(hyperparameters['MODEL_CHECKPOINT'])
        target_transform = ExtractBoundingBoxes(include_shape_types=['rectangle'])

        try:
            train_dataset = ProjectVisionDataset(client, project_id=PROJECT_ID,
                                                 include_subsets=['Train'],
                                                 target_transform=target_transform,
                                                 label_name_to_index=label_map_for_cvat)
        except KeyError as e:
            print("\n" + "#"*80)
            print("###                   KRITISK FEIL FUNNET                   ###")
            print("#"*80)
            print(f"\nFeilen er en 'KeyError', som betyr at en label ble funnet i en av dine tasks,")
            print(f"men den fantes ikke i den offisielle label-listen for prosjektet.")
            print(f"\nDen manglende nøkkelen er: {e}")
            print("\nDette skyldes vanligvis en inkonsistens i CVAT-datasettet ditt.")
            print("SJEKK FØLGENDE:")
            print("1. Har du endret labels på prosjekt-nivå etter at noen av tasksene ble opprettet?")
            print("2. Er det en forskjell i store/små bokstaver (f.eks. 'car' vs 'Car')?")
            print("\nAnbefalt løsning: Gå gjennom labels i CVAT-prosjektet og alle tilhørende tasks for å sikre konsistens.")
            print("#"*80)
            sys.exit(1)
        except Exception as e:
            print(f"En uventet feil oppstod under lasting av datasett: {e}")
            sys.exit(1)

    # --- KODEN SOM MANGLET ER NÅ TILBAKE ---
    def collate_fn(batch):
        pixel_values = [item[0] for item in batch]
        targets_from_cvat = [item[1] for item in batch]
        coco_formatted_annotations = []
        for i, target in enumerate(targets_from_cvat):
            annotations_for_image = []
            for box, label in zip(target['boxes'], target['labels']):
                x_min, y_min, x_max, y_max = box.tolist(); width, height = x_max - x_min, y_max - y_min
                annotations_for_image.append({ "bbox": [x_min, y_min, width, height], "category_id": label.item(), "area": width * height, "iscrowd": 0, "image_id": i })
            coco_formatted_annotations.append({"image_id": i, "annotations": annotations_for_image})
        encoding = image_processor(images=pixel_values, annotations=coco_formatted_annotations, return_tensors="pt")
        labels = encoding.pop("labels")
        return encoding, labels

    train_loader = DataLoader(train_dataset, batch_size=hyperparameters['BATCH_SIZE'],
                              shuffle=True, collate_fn=collate_fn)

    print(f"\nLaster forhåndstrent modell...")
    model = RTDetrForObjectDetection.from_pretrained(
        hyperparameters['MODEL_CHECKPOINT'],
        id2label=id2label_normalized,
        label2id=label2id_normalized,
        ignore_mismatched_sizes=True
    )
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)
    optimizer = AdamW(model.parameters(), lr=hyperparameters['LEARNING_RATE'])
    logger = task.get_logger()

    print(f"\nStarter trening for {hyperparameters['NUM_EPOCHS']} epoker...")
    for epoch in range(hyperparameters['NUM_EPOCHS']):
        model.train(); total_loss = 0
        for batch_idx, (encoding, labels) in enumerate(train_loader):
            pixel_values = encoding["pixel_values"].to(device)
            labels = [{k: v.to(device) for k, v in t.items()} for t in labels]
            outputs = model(pixel_values=pixel_values, labels=labels)
            loss = outputs.loss
            optimizer.zero_grad(); loss.backward(); optimizer.step()
            total_loss += loss.item()
        avg_train_loss = total_loss / len(train_loader)
        print(f"Epoke {epoch+1}/{hyperparameters['NUM_EPOCHS']}, Loss: {avg_train_loss:.4f}")
        logger.report_scalar(title="Loss", series="Training", value=avg_train_loss, iteration=epoch+1)

    output_path = "./rt_detr_finetuned_model"
    os.makedirs(output_path, exist_ok=True)

    onnx_simplified_path = export_to_onnx_robust(model.to('cpu'), image_processor, output_path, device='cpu')
    if not onnx_simplified_path:
        print("\nONNX-eksporten feilet. Kan ikke registrere modell i ClearML. Avslutter.")
        task.close(); sys.exit(1)

    print("\nRegistrerer trente modeller i ClearML...")
    output_model = OutputModel(task=task, name="CVAT RT-DETR Model")
    output_model.update_weights(weights_filename=onnx_simplified_path)
    print(f":white_check_mark: ONNX-modell registrert som Output Model med navnet: '{output_model.name}'")

    task.upload_artifact(name="HuggingFace RT-DETR Model Files", artifact_object=output_path)
    print(":white_check_mark: Lastet opp HuggingFace-mappen som en generell artefakt.")

    print("\nVenter på at alle filer skal bli lastet opp til ClearML-serveren...")
    task.flush(wait_for_uploads=True)
    print("Alle opplastinger er fullført.")

    task.close()
    print("\nTrening fullført og modeller registrert i ClearML!")

if __name__ == "__main__":
    train_model()
  
  
Posted 21 hours ago
Votes Newest

Answers

59 Views
0 Answers
21 hours ago
10 hours ago
Tags