Skip to content

Commit

Permalink
style: format code with Autopep8, Black, isort, Prettier, StandardJS …
Browse files Browse the repository at this point in the history
…and Yapf

This commit fixes the style issues introduced in d7c0184 according to the output
from Autopep8, Black, isort, Prettier, StandardJS and Yapf.

Details: None
  • Loading branch information
deepsource-autofix[bot] authored Jan 12, 2025
1 parent d7c0184 commit d0c1a28
Show file tree
Hide file tree
Showing 9 changed files with 218 additions and 218 deletions.
3 changes: 2 additions & 1 deletion tests/imageid/imageid.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@
detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(
os.path.join(execution_path, "./testing/resnet50_coco_best_v2.0.1.h5"))
os.path.join(execution_path, "./testing/resnet50_coco_best_v2.0.1.h5")
)
detector.loadModel()
custom_objects = detector.CustomObjects(
person=True,
Expand Down
4 changes: 3 additions & 1 deletion tests/imageid/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
argument -- description
Return: return_description
"""

import os

from imageai.Detection import ObjectDetection
Expand All @@ -13,7 +14,8 @@
detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(
os.path.join(execution_path, "./testing/resnet50_coco_best_v2.0.1.h5"))
os.path.join(execution_path, "./testing/resnet50_coco_best_v2.0.1.h5")
)
detector.loadModel()
custom_objects = detector.CustomObjects(
person=True,
Expand Down
11 changes: 5 additions & 6 deletions tests/open_images/0.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
info = data_other.iloc[data_idx] # getting the info of the index
img = cv2.imread(f'./Img/{info["Path"]}') # reading the img
height, width = cv2.imread(
"./Img/" +
info["Path"]).shape[:2] # getting the height and width of the image
height, width = cv2.imread("./Img/" + info["Path"]).shape[
:2
] # getting the height and width of the image
xmin, ymin, xmax, ymax = (
info["XMin"],
info["YMin"],
Expand All @@ -19,6 +19,5 @@
w = xmax - xmin
h = ymax - ymin
x, y, w, h = round(x), round(y), round(w), round(h)
roi = img[y:y + h, x:x + w] # crop the image
cv2.rectangle(img, (x, y), (x + w, y + h), (200, 0, 0),
10) # draw box around the bbox
roi = img[y : y + h, x : x + w] # crop the image
cv2.rectangle(img, (x, y), (x + w, y + h), (200, 0, 0), 10) # draw box around the bbox
14 changes: 8 additions & 6 deletions tests/open_images/find_card_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@
sat = hsv[:, :, 1] # Convert the hsv Image to GrayScale
cv2.imwrite("sat.png", sat) # Saving the Sat
thresh = cv2.threshold(sat, 15, 255, cv2.THRESH_BINARY)[
1] # The Outline of the Sat which is the GrayScale Img
1
] # The Outline of the Sat which is the GrayScale Img
cv2.imwrite("thresh.png", thresh) # Saving the thresh
thresh = 255 - thresh # Converts Black to White and White to Black
cv2.imwrite("thresh-2.png", thresh) # Saving the thresh
Expand All @@ -42,19 +43,20 @@
im, cv2.COLOR_BGR2GRAY
) # Conver the black and white image to a image like https://i.stack.imgur.com/UPOZC.png
cv2.imwrite("gray.png", gray) # Saving the gray image
contours, _ = cv2.findContours(
gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[-2:] # Finding the boxes
contours, _ = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[
-2:
] # Finding the boxes

for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt) # Convert cnt to x,y,w,h
if (
w > 250 and h > 250
w > 250 and h > 250
): # Checking if the h and w of the image is higher than 175 so this will only get the card
idx += 1
cv2.rectangle(img, (x, y), (x + w, y + h), (200, 0, 0))
img = cv2.imread(f"./Imgs/{file}") # get the original image
img = cv2.resize(img, (2000, 2000))
crop_img = img[y:y + h, x:x + w] # Cropping
crop_img = img[y : y + h, x : x + w] # Cropping
if f"./Imgs/{file}" in list(data):
data[f"{file}"]["X"].append(x)
data[f"{file}"]["Y"].append(y)
Expand All @@ -80,5 +82,5 @@
f"./Preds/{file}.jpeg",
cv2.rectangle(img, (X, Y), (X + W, Y + H), (200, 0, 0)),
) # Saving the corped image
crop_img = img[Y:Y + H, X:X + W] # Cropping
crop_img = img[Y : Y + H, X : X + W] # Cropping
cv2.imwrite(f"./Imgs/{key}", crop_img) # Saving the corped image
23 changes: 10 additions & 13 deletions tests/pytorch-lightning/Learning-PyTorch-Lightning-main/0.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,25 +46,22 @@ def training_step(self, batch, batch_idx):
@staticmethod
def train_dataloader():
train_dataset = torchvision.datasets.MNIST(
root="./data",
train=True,
transform=transforms.ToTensor(),
download=True)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
num_workers=4,
shuffle=False)
root="./data", train=True, transform=transforms.ToTensor(), download=True
)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size=batch_size, num_workers=4, shuffle=False
)
return train_loader

@staticmethod
def val_dataloader():
test_dataset = torchvision.datasets.MNIST(
root="./data", train=False, transform=transforms.ToTensor())
root="./data", train=False, transform=transforms.ToTensor()
)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
num_workers=4,
shuffle=False)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset, batch_size=batch_size, num_workers=4, shuffle=False
)
return test_loader

def validation_step(self, batch, batch_idx):
Expand Down
1 change: 1 addition & 0 deletions tests/tflite/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
argument -- description
Return: return_description
"""

import datetime
import os

Expand Down
29 changes: 16 additions & 13 deletions tests/tflite/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@
from tflite_model_maker import model_spec, object_detector
from tflite_model_maker.config import ExportFormat, QuantizationConfig

print("Num GPUs Available: ",
len(tf.config.experimental.list_physical_devices("GPU")))
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices("GPU")))


class TFLite:
Expand All @@ -29,8 +28,7 @@ def __init__(
train_whole_model: bool = True,
export_dir: str = ".",
model_path: str = "model.tflite",
data_loading_csv:
str = "gs://cloud-ml-data/img/openimage/csv/salads_ml_use.csv",
data_loading_csv: str = "gs://cloud-ml-data/img/openimage/csv/salads_ml_use.csv",
) -> None:
"""sumary_line
Keyword arguments:
Expand All @@ -43,8 +41,7 @@ def __init__(
self.train_whole_model = train_whole_model
self.export_dir = export_dir
self.data_loading_csv = data_loading_csv
self.log_dir = "logs/fit/" + datetime.datetime.now().strftime(
"%Y%m%d-%H%M%S")
self.log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
self.epochs = epochs
self.threshold = 0.125
self.model_path = model_path
Expand Down Expand Up @@ -136,21 +133,27 @@ def create_test_image(self, image_path):
"""
with tf.device("/GPU:0"):
print("Creating test images")
_, input_height, input_width, _ = self.interpreter.get_input_details(
)[0]["shape"]
_, input_height, input_width, _ = self.interpreter.get_input_details()[0][
"shape"
]
# Read image in tf encoded format
img = tf.io.read_file(image_path)
img = tf.io.decode_image(
img, channels=3) # Decode the image (Load the image)
img, channels=3
) # Decode the image (Load the image)
img = tf.image.convert_image_dtype(
img, tf.uint8) # Convert to Data Type Unit8
img, tf.uint8
) # Convert to Data Type Unit8
self.original_image = img
self.resized_img = tf.image.resize(
img, (input_height, input_width)) # Resize Image
img, (input_height, input_width)
) # Resize Image
self.resized_img = self.resized_img[
tf.newaxis, :] # Add 1 dimension to the image
tf.newaxis, :
] # Add 1 dimension to the image
self.resized_img = tf.cast(
self.resized_img, dtype=tf.uint8) # Convert to Data Type Unit8
self.resized_img, dtype=tf.uint8
) # Convert to Data Type Unit8

def predict_test_image(self):
"""sumary_line
Expand Down
11 changes: 5 additions & 6 deletions tests/torch_object_detection/data_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,13 +68,13 @@ def __getitem__(self, idx):
# convert everything into a torch.Tensor
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# there is only one class
labels = torch.ones((num_objs, ), dtype=torch.int64)
labels = torch.ones((num_objs,), dtype=torch.int64)
masks = torch.as_tensor(masks, dtype=torch.uint8)

image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
# suppose all instances are not crowd
iscrowd = torch.zeros((num_objs, ), dtype=torch.int64)
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)

target = {
"boxes": boxes,
Expand All @@ -95,7 +95,6 @@ def __len__(self):
return len(self.imgs)


dl = torch.utils.data.DataLoader(PennFudanDataset(),
batch_size=32,
shuffle=True,
num_workers=2)
dl = torch.utils.data.DataLoader(
PennFudanDataset(), batch_size=32, shuffle=True, num_workers=2
)
Loading

0 comments on commit d0c1a28

Please sign in to comment.