diff --git a/src/index.ts b/src/index.ts index f0e3148..49659ba 100644 --- a/src/index.ts +++ b/src/index.ts @@ -1,8 +1,8 @@ -import { PathLike } from "fs"; -import fs from "fs/promises"; -import path from "path"; +import { mkdir, writeFile } from "fs/promises"; +import * as path from "path"; import sharp from "sharp"; +// Supported image formats const SUPPORTED_FORMATS = [ ".png", ".jpg", @@ -13,6 +13,7 @@ const SUPPORTED_FORMATS = [ ".tiff", ]; +// Orientation types export enum Orientation { TOP = 1, LEFT = 2, @@ -20,6 +21,7 @@ export enum Orientation { RIGHT = 4, } +// Interface for library options export interface SpeechBubbleOptions { mirror?: boolean; orientation?: Orientation; @@ -85,44 +87,44 @@ export class ImageSpeechBubbleTransformer { inputBuffer: Buffer, options: SpeechBubbleOptions = {} ): Promise { + // Validate input if (!inputBuffer) { throw new Error("Input buffer is required"); } + // Use default speech bubble path if not provided const speechBubblePath = options.speechBubblePath || this.defaultSpeechBubblePath; + // Check file formats this.checkFileFormat(speechBubblePath); - const speechBubbleBuffer = await sharp(speechBubblePath).toBuffer(); + // Get input image metadata + const inputMetadata = await sharp(inputBuffer).metadata(); + // Read and resize speech bubble to match input image + const speechBubbleBuffer = await sharp(speechBubblePath) + .resize({ + width: inputMetadata.width, + height: inputMetadata.height, + fit: sharp.fit.cover, + position: sharp.strategy.entropy, + }) + .toBuffer(); + + // Process main image const processedImage = await sharp(inputBuffer) - .ensureAlpha() - .metadata() - .then(async (metadata) => { - // match image size - const resizedSpeechBubble = await sharp(speechBubbleBuffer) - .resize(metadata.width, metadata.height) - .toBuffer(); - - const transformedSpeechBubble = await this.transformImage( - resizedSpeechBubble, - { + .ensureAlpha() // Ensure alpha channel + .composite([ + { + input: await this.transformImage(speechBubbleBuffer, { mirror: options.mirror, orientation: options.orientation, - } - ); - - // subtract speech bubble - return sharp(inputBuffer) - .composite([ - { - input: transformedSpeechBubble, - blend: "difference", - }, - ]) - .toBuffer(); - }); + }), + blend: "xor", + }, + ]) + .toBuffer(); return processedImage; } @@ -132,10 +134,11 @@ export class ImageSpeechBubbleTransformer { */ async processAndSave( inputBuffer: Buffer, - outputPath: PathLike, + outputPath: string, options: SpeechBubbleOptions = {} ): Promise { - this.checkFileFormat(outputPath.toString()); + // Check output file format + this.checkFileFormat(outputPath); // Process image const processedBuffer = await this.processSpeechBubble( @@ -143,13 +146,17 @@ export class ImageSpeechBubbleTransformer { options ); - const outputDir = path.dirname(outputPath.toString()); - await fs.mkdir(outputDir, { recursive: true }); + // Ensure output directory exists + const outputDir = path.dirname(outputPath); + await mkdir(outputDir, { recursive: true }); - await sharp(processedBuffer).toFile(outputPath.toString()); + // Save processed image + await writeFile(outputPath, processedBuffer); + console.log(`Image saved at ${path.resolve(outputPath)}`); } } +// Example usage export function createSpeechBubbleTransformer(assetsDir?: string) { return new ImageSpeechBubbleTransformer(assetsDir); }