-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathTrain_brain-tumors-detection_Yolov10.py
71 lines (57 loc) · 3.06 KB
/
Train_brain-tumors-detection_Yolov10.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
# https://medium.com/@huzeyfebicakci/custom-dataset-training-with-yolov10-a-deep-dive-into-the-latest-evolution-in-real-time-object-ab8c62c6af85
# modified by Alfonso Blanco García
# using dataset:
# https://universe.roboflow.com/test-svk7h/brain-tumors-detection/dataset/2
import os
import cv2
import ultralytics
# instalado con !pip install -q git+https://github.com/THU-MIG/yolov10.git
# https://blog.roboflow.com/yolov10-how-to-train/
# en mi caso:
#(alfonso1) C:\Users\Alfonso Blanco\.conda\envs\alfonso1\Scripts>python pip-script.py install git+https://github.com/THU-MIG/yolov10.git
#from ultralytics import YOLOv10
from ultralytics import YOLO
import torch
class ObjectDetection:
def __init__(self):
self.dir="C:/brain-tumors-detection_yolov10"
def train(self, runName):
# downloaded from https://github.com/THU-MIG/yolov10/releases/tag/v1.1
model = YOLO("yolov10n.pt")
#model = YOLO("yolov10s.pt")
yaml_path = "data.yaml"
results = model.train(
data= yaml_path, # Path to your dataset config file
#batch = 16, # Training batch size
batch=-1, # dinamically assigned
#imgsz= 640, # Input image size
#epochs= 2000, # Number of training epochs
epochs= 100, # Number of training epochs
optimizer= 'SGD', # Optimizer, can be 'Adam', 'SGD', etc.
lr0= 0.01, # Initial learning rate
lrf= 0.1, # Final learning rate factor
weight_decay= 0.0005, # Weight decay for regularization
momentum= 0.937, # Momentum (SGD-specific)
verbose= True, # Verbose output
#device= '0', # GPU device index or 'cpu'
device= 'cpu', # GPU device index or 'cpu'
workers= 8, # Number of workers for data loading
project= 'runs/train', # Output directory for results
name= 'exp', # Experiment name
exist_ok= False, # Overwrite existing project/name directory
rect= False, # Use rectangular training (speed optimization)
resume= False, # Resume training from the last checkpoint
#multi_scale= False, # Use multi-scale training
multi_scale= True,
#single_cls= False, # Treat data as single-class
single_cls= False,
#freeze = 20, #default değer : none
#resume=True, #Başka bilgisatarda eğitim devamı yapılamıyor.
#name=runName,
)
@staticmethod
def train_custom_dataset(runName):
od = ObjectDetection()
od.train(runName)
# Example usage:
ObjectDetection.train_custom_dataset('trained_model')