diff --git a/invokeai/app/invocations/image.py b/invokeai/app/invocations/image.py index f0e3727d56f..8df5760c041 100644 --- a/invokeai/app/invocations/image.py +++ b/invokeai/app/invocations/image.py @@ -1074,8 +1074,6 @@ def invoke(self, context: InvocationContext) -> CanvasV2MaskAndCropOutput: ) - - @invocation_output("crop_to_object_output") class CropToObjectOutput(ImageOutput): offset_top: int = OutputField(description="The number of pixels cropped from the top") @@ -1096,9 +1094,8 @@ class CropToObjectInvocation(BaseInvocation, WithMetadata, WithBoard): image: ImageField = InputField(description="An input mask image with black and white content") margin: int = InputField(default=0, ge=0, description="The desired margin around the object, as measured in pixels") - object_color: Literal['white', 'black'] = InputField( - default='white', - description="The color of the object to crop around (either 'white' or 'black')" + object_color: Literal["white", "black"] = InputField( + default="white", description="The color of the object to crop around (either 'white' or 'black')" ) def invoke(self, context: InvocationContext) -> CropToObjectOutput: @@ -1112,7 +1109,7 @@ def invoke(self, context: InvocationContext) -> CropToObjectOutput: np_image = numpy.array(grayscale_image) # Depending on the object color, find the object pixels - if self.object_color == 'white': + if self.object_color == "white": # Find white pixels (value > 0) object_pixels = numpy.argwhere(np_image > 0) else: @@ -1162,4 +1159,4 @@ def invoke(self, context: InvocationContext) -> CropToObjectOutput: offset_left=offset_left, offset_right=offset_right, offset_bottom=offset_bottom, - ) \ No newline at end of file + ) diff --git a/invokeai/app/invocations/mask.py b/invokeai/app/invocations/mask.py index d48eb771f6b..123af32188d 100644 --- a/invokeai/app/invocations/mask.py +++ b/invokeai/app/invocations/mask.py @@ -108,15 +108,14 @@ class ImageMaskToTensorInvocation(BaseInvocation, WithMetadata): def invoke(self, context: InvocationContext) -> MaskOutput: image = context.images.get_pil(self.image.image_name) np_image = np.array(image) - # Handle different image modes - if image.mode == 'RGBA': + if image.mode == "RGBA": alpha_channel = np_image[:, :, 3] # Extract alpha channel - elif image.mode == 'RGB': + elif image.mode == "RGB": # For RGB images, treat all non-black pixels as opaque. non_black_mask = np.any(np_image > 0, axis=2) # True for any non-black pixels alpha_channel = non_black_mask.astype(np.uint8) * 255 # Convert to a mask of 0 or 255 - elif image.mode == 'L': # Grayscale images + elif image.mode == "L": # Grayscale images alpha_channel = np_image # Grayscale image, so we directly use it else: raise ValueError(f"Unsupported image mode: {image.mode}") @@ -135,8 +134,6 @@ def invoke(self, context: InvocationContext) -> MaskOutput: ) - - @invocation( "tensor_mask_to_image", title="Tensor Mask to Image",