Compare commits

...

2 commits

Author SHA1 Message Date
jolts
44c0f27d37 1.1.0
Some checks failed
CI / build-and-publish (push) Has been cancelled
2025-01-25 07:53:03 +02:00
jolts
65603ca443 fix: finally made it 2025-01-25 07:52:55 +02:00
2 changed files with 41 additions and 34 deletions

View file

@ -1,6 +1,6 @@
{
"name": "image-speech-bubble-transformer",
"version": "1.0.3",
"version": "1.1.0",
"description": "TypeScript library for applying speech bubble effects to images",
"main": "dist/index.js",
"types": "dist/index.d.ts",

View file

@ -1,8 +1,8 @@
import { PathLike } from "fs";
import fs from "fs/promises";
import path from "path";
import { mkdir, writeFile } from "fs/promises";
import * as path from "path";
import sharp from "sharp";
// Supported image formats
const SUPPORTED_FORMATS = [
".png",
".jpg",
@ -13,6 +13,7 @@ const SUPPORTED_FORMATS = [
".tiff",
];
// Orientation types
export enum Orientation {
TOP = 1,
LEFT = 2,
@ -20,6 +21,7 @@ export enum Orientation {
RIGHT = 4,
}
// Interface for library options
export interface SpeechBubbleOptions {
mirror?: boolean;
orientation?: Orientation;
@ -85,44 +87,44 @@ export class ImageSpeechBubbleTransformer {
inputBuffer: Buffer,
options: SpeechBubbleOptions = {}
): Promise<Buffer> {
// Validate input
if (!inputBuffer) {
throw new Error("Input buffer is required");
}
// Use default speech bubble path if not provided
const speechBubblePath =
options.speechBubblePath || this.defaultSpeechBubblePath;
// Check file formats
this.checkFileFormat(speechBubblePath);
const speechBubbleBuffer = await sharp(speechBubblePath).toBuffer();
// Get input image metadata
const inputMetadata = await sharp(inputBuffer).metadata();
// Read and resize speech bubble to match input image
const speechBubbleBuffer = await sharp(speechBubblePath)
.resize({
width: inputMetadata.width,
height: inputMetadata.height,
fit: sharp.fit.cover,
position: sharp.strategy.entropy,
})
.toBuffer();
// Process main image
const processedImage = await sharp(inputBuffer)
.ensureAlpha()
.metadata()
.then(async (metadata) => {
// match image size
const resizedSpeechBubble = await sharp(speechBubbleBuffer)
.resize(metadata.width, metadata.height)
.toBuffer();
const transformedSpeechBubble = await this.transformImage(
resizedSpeechBubble,
{
.ensureAlpha() // Ensure alpha channel
.composite([
{
input: await this.transformImage(speechBubbleBuffer, {
mirror: options.mirror,
orientation: options.orientation,
}
);
// subtract speech bubble
return sharp(inputBuffer)
.composite([
{
input: transformedSpeechBubble,
blend: "difference",
},
])
.toBuffer();
});
}),
blend: "xor",
},
])
.toBuffer();
return processedImage;
}
@ -132,10 +134,11 @@ export class ImageSpeechBubbleTransformer {
*/
async processAndSave(
inputBuffer: Buffer,
outputPath: PathLike,
outputPath: string,
options: SpeechBubbleOptions = {}
): Promise<void> {
this.checkFileFormat(outputPath.toString());
// Check output file format
this.checkFileFormat(outputPath);
// Process image
const processedBuffer = await this.processSpeechBubble(
@ -143,13 +146,17 @@ export class ImageSpeechBubbleTransformer {
options
);
const outputDir = path.dirname(outputPath.toString());
await fs.mkdir(outputDir, { recursive: true });
// Ensure output directory exists
const outputDir = path.dirname(outputPath);
await mkdir(outputDir, { recursive: true });
await sharp(processedBuffer).toFile(outputPath.toString());
// Save processed image
await writeFile(outputPath, processedBuffer);
console.log(`Image saved at ${path.resolve(outputPath)}`);
}
}
// Example usage
export function createSpeechBubbleTransformer(assetsDir?: string) {
return new ImageSpeechBubbleTransformer(assetsDir);
}