Compare commits
2 commits
3d14178546
...
44c0f27d37
Author | SHA1 | Date | |
---|---|---|---|
![]() |
44c0f27d37 | ||
![]() |
65603ca443 |
2 changed files with 41 additions and 34 deletions
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "image-speech-bubble-transformer",
|
"name": "image-speech-bubble-transformer",
|
||||||
"version": "1.0.3",
|
"version": "1.1.0",
|
||||||
"description": "TypeScript library for applying speech bubble effects to images",
|
"description": "TypeScript library for applying speech bubble effects to images",
|
||||||
"main": "dist/index.js",
|
"main": "dist/index.js",
|
||||||
"types": "dist/index.d.ts",
|
"types": "dist/index.d.ts",
|
||||||
|
|
65
src/index.ts
65
src/index.ts
|
@ -1,8 +1,8 @@
|
||||||
import { PathLike } from "fs";
|
import { mkdir, writeFile } from "fs/promises";
|
||||||
import fs from "fs/promises";
|
import * as path from "path";
|
||||||
import path from "path";
|
|
||||||
import sharp from "sharp";
|
import sharp from "sharp";
|
||||||
|
|
||||||
|
// Supported image formats
|
||||||
const SUPPORTED_FORMATS = [
|
const SUPPORTED_FORMATS = [
|
||||||
".png",
|
".png",
|
||||||
".jpg",
|
".jpg",
|
||||||
|
@ -13,6 +13,7 @@ const SUPPORTED_FORMATS = [
|
||||||
".tiff",
|
".tiff",
|
||||||
];
|
];
|
||||||
|
|
||||||
|
// Orientation types
|
||||||
export enum Orientation {
|
export enum Orientation {
|
||||||
TOP = 1,
|
TOP = 1,
|
||||||
LEFT = 2,
|
LEFT = 2,
|
||||||
|
@ -20,6 +21,7 @@ export enum Orientation {
|
||||||
RIGHT = 4,
|
RIGHT = 4,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Interface for library options
|
||||||
export interface SpeechBubbleOptions {
|
export interface SpeechBubbleOptions {
|
||||||
mirror?: boolean;
|
mirror?: boolean;
|
||||||
orientation?: Orientation;
|
orientation?: Orientation;
|
||||||
|
@ -85,44 +87,44 @@ export class ImageSpeechBubbleTransformer {
|
||||||
inputBuffer: Buffer,
|
inputBuffer: Buffer,
|
||||||
options: SpeechBubbleOptions = {}
|
options: SpeechBubbleOptions = {}
|
||||||
): Promise<Buffer> {
|
): Promise<Buffer> {
|
||||||
|
// Validate input
|
||||||
if (!inputBuffer) {
|
if (!inputBuffer) {
|
||||||
throw new Error("Input buffer is required");
|
throw new Error("Input buffer is required");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Use default speech bubble path if not provided
|
||||||
const speechBubblePath =
|
const speechBubblePath =
|
||||||
options.speechBubblePath || this.defaultSpeechBubblePath;
|
options.speechBubblePath || this.defaultSpeechBubblePath;
|
||||||
|
|
||||||
|
// Check file formats
|
||||||
this.checkFileFormat(speechBubblePath);
|
this.checkFileFormat(speechBubblePath);
|
||||||
|
|
||||||
const speechBubbleBuffer = await sharp(speechBubblePath).toBuffer();
|
// Get input image metadata
|
||||||
|
const inputMetadata = await sharp(inputBuffer).metadata();
|
||||||
|
|
||||||
const processedImage = await sharp(inputBuffer)
|
// Read and resize speech bubble to match input image
|
||||||
.ensureAlpha()
|
const speechBubbleBuffer = await sharp(speechBubblePath)
|
||||||
.metadata()
|
.resize({
|
||||||
.then(async (metadata) => {
|
width: inputMetadata.width,
|
||||||
// match image size
|
height: inputMetadata.height,
|
||||||
const resizedSpeechBubble = await sharp(speechBubbleBuffer)
|
fit: sharp.fit.cover,
|
||||||
.resize(metadata.width, metadata.height)
|
position: sharp.strategy.entropy,
|
||||||
|
})
|
||||||
.toBuffer();
|
.toBuffer();
|
||||||
|
|
||||||
const transformedSpeechBubble = await this.transformImage(
|
// Process main image
|
||||||
resizedSpeechBubble,
|
const processedImage = await sharp(inputBuffer)
|
||||||
{
|
.ensureAlpha() // Ensure alpha channel
|
||||||
mirror: options.mirror,
|
|
||||||
orientation: options.orientation,
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
// subtract speech bubble
|
|
||||||
return sharp(inputBuffer)
|
|
||||||
.composite([
|
.composite([
|
||||||
{
|
{
|
||||||
input: transformedSpeechBubble,
|
input: await this.transformImage(speechBubbleBuffer, {
|
||||||
blend: "difference",
|
mirror: options.mirror,
|
||||||
|
orientation: options.orientation,
|
||||||
|
}),
|
||||||
|
blend: "xor",
|
||||||
},
|
},
|
||||||
])
|
])
|
||||||
.toBuffer();
|
.toBuffer();
|
||||||
});
|
|
||||||
|
|
||||||
return processedImage;
|
return processedImage;
|
||||||
}
|
}
|
||||||
|
@ -132,10 +134,11 @@ export class ImageSpeechBubbleTransformer {
|
||||||
*/
|
*/
|
||||||
async processAndSave(
|
async processAndSave(
|
||||||
inputBuffer: Buffer,
|
inputBuffer: Buffer,
|
||||||
outputPath: PathLike,
|
outputPath: string,
|
||||||
options: SpeechBubbleOptions = {}
|
options: SpeechBubbleOptions = {}
|
||||||
): Promise<void> {
|
): Promise<void> {
|
||||||
this.checkFileFormat(outputPath.toString());
|
// Check output file format
|
||||||
|
this.checkFileFormat(outputPath);
|
||||||
|
|
||||||
// Process image
|
// Process image
|
||||||
const processedBuffer = await this.processSpeechBubble(
|
const processedBuffer = await this.processSpeechBubble(
|
||||||
|
@ -143,13 +146,17 @@ export class ImageSpeechBubbleTransformer {
|
||||||
options
|
options
|
||||||
);
|
);
|
||||||
|
|
||||||
const outputDir = path.dirname(outputPath.toString());
|
// Ensure output directory exists
|
||||||
await fs.mkdir(outputDir, { recursive: true });
|
const outputDir = path.dirname(outputPath);
|
||||||
|
await mkdir(outputDir, { recursive: true });
|
||||||
|
|
||||||
await sharp(processedBuffer).toFile(outputPath.toString());
|
// Save processed image
|
||||||
|
await writeFile(outputPath, processedBuffer);
|
||||||
|
console.log(`Image saved at ${path.resolve(outputPath)}`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Example usage
|
||||||
export function createSpeechBubbleTransformer(assetsDir?: string) {
|
export function createSpeechBubbleTransformer(assetsDir?: string) {
|
||||||
return new ImageSpeechBubbleTransformer(assetsDir);
|
return new ImageSpeechBubbleTransformer(assetsDir);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue